source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
MovePolygonGUI.py
|
# This example shows how RoboDK and the Python GUI tkinter can display graphical user interface to customize program generation according to certain parameters
# This example is an improvement of the weld Hexagon
from robodk.robolink import * # API to communicate with RoboDK
from robodk.robomath import * # Robot toolbox
import threading
# Set up default parameters
PROGRAM_NAME = "DoWeld" # Name of the program
APPROACH = 300 # Approach distance
RADIUS = 200 # Radius of the polygon
SPEED_WELD = 50 # Speed in mn/s of the welding path
SPEED_MOVE = 200 # Speed in mm/s of the approach/retract movements
SIDES = 8 # Number of sides for the polygon
DRY_RUN = 1 # If 0, it will generate SprayOn/SprayOff program calls, otherwise it will not activate the tool
RUN_MODE = RUNMODE_SIMULATE # Simulation behavior (simulate, generate program or generate the program and send it to the robot)
# use RUNMODE_SIMULATE to simulate only
# use RUNMODE_MAKE_ROBOTPROG to generate the program
# use RUNMODE_MAKE_ROBOTPROG_AND_UPLOAD to generate the program and send it to the robot
# Main program
def RunProgram():
# Use default global variables
global PROGRAM_NAME
global APPROACH
global RADIUS
global SPEED_WELD
global SPEED_MOVE
global SIDES
global DRY_RUN
global RUN_MODE
# Any interaction with RoboDK must be done through RDK:
RDK = Robolink()
# Get the robot (first robot found):
robot = RDK.Item('', ITEM_TYPE_ROBOT)
if not robot.Valid():
RDK.ShowMessage("Robot is not valid or selected", False)
# Get the reference target by name:
#target = RDK.Item('Target 1')
#target_pose = target.Pose()
#robot.MoveJ(target)
# get the robot as an item:
robot = RDK.Item('', ITEM_TYPE_ROBOT)
# impose the run mode
RDK.setRunMode(RUN_MODE)
# set the name of the generated program
RDK.ProgramStart(PROGRAM_NAME, "", "", robot)
# get the pose of the reference target (4x4 matrix representing position and orientation):
poseref = robot.Pose() # or target.Pose()
# move the robot to home, then to an approach position
robot.setSpeed(SPEED_MOVE)
robot.MoveJ(transl(0, 0, APPROACH) * poseref)
# make an polygon of n SIDES around the reference target
for i in range(SIDES + 1):
ang = i * 2 * pi / SIDES #angle: 0, 60, 120, ...
# Calculate next position
posei = poseref * rotz(ang) * transl(RADIUS, 0, 0) * rotz(-ang)
robot.MoveL(posei)
# Impose weld speed only for the first point
if i == 0:
# Set weld speed and activate the gun after reaching the first point
robot.setSpeed(SPEED_WELD)
if not DRY_RUN:
# Activate the spray right after we reached the first point
robot.RunInstruction("SprayOn", INSTRUCTION_CALL_PROGRAM)
# Stop the tool if we are not doing a dry run
if not DRY_RUN:
robot.RunInstruction("SprayOff", INSTRUCTION_CALL_PROGRAM)
robot.setSpeed(SPEED_MOVE)
# move back to the approach point, then home:
robot.MoveL(transl(0, 0, APPROACH) * poseref)
robot.MoveJ(poseref)
# Provoke program generation (automatic when RDK is finished)
RDK.Finish()
# Use tkinter to display GUI menus
#from tkinter import *
import tkinter as tk
# Generate the main window
window = tk.Tk()
# Use variables linked to the global variables
runmode = tk.IntVar()
runmode.set(RUN_MODE) # setting up default value
dryrun = tk.IntVar()
dryrun.set(DRY_RUN) # setting up default value
entry_name = tk.StringVar()
entry_name.set(PROGRAM_NAME)
entry_speed = tk.StringVar()
entry_speed.set(str(SPEED_WELD))
# Define feedback call
def ShowRunMode():
print("Selected run mode: " + str(runmode.get()))
# Define a label and entry text for the program name
tk.Label(window, text="Program name").pack(fill=tk.X, expand=0)
tk.Entry(window, textvariable=entry_name).pack(fill=tk.X, expand=0)
# Define a label and entry text for the weld speed
tk.Label(window, text="Weld speed (mm/s)").pack(fill=tk.X, expand=0)
tk.Entry(window, textvariable=entry_speed).pack(fill=tk.X, expand=0)
# Define a check box to do a dry run
tk.Checkbutton(window, text="Dry run", variable=dryrun, onvalue=1, offvalue=0, height=5, width=20).pack(fill=tk.X, expand=0)
# Add a display label for the run mode
tk.Label(window, text="Run mode", justify=tk.LEFT, padx=20).pack(fill=tk.X, expand=0)
# Set up the run modes (radio buttons)
runmodes = [("Simulate", RUNMODE_SIMULATE), ("Generate program", RUNMODE_MAKE_ROBOTPROG), ("Send program to robot", RUNMODE_MAKE_ROBOTPROG_AND_START)]
for txt, val in runmodes:
tk.Radiobutton(window, text=txt, padx=20, variable=runmode, command=ShowRunMode, value=val).pack(fill=tk.X, expand=0)
# Add a button and default action to execute the current choice of the user
def ExecuteChoice():
def run_thread():
global DRY_RUN
global RUN_MODE
global SPEED_WELD
global PROGRAM_NAME
DRY_RUN = dryrun.get()
RUN_MODE = runmode.get()
SPEED_WELD = float(entry_speed.get())
PROGRAM_NAME = entry_name.get()
# Run the main program once all the global variables have been set
try:
RunProgram()
except Exception as e:
RDK = Robolink()
msg = "Unextected program error: " + str(e)
RDK.ShowMessage(msg, False)
print(msg)
raise e # raise the error (visible if we are in console mode)
threading.Thread(target=run_thread).start()
tk.Button(window, text='Simulate/Generate', command=ExecuteChoice, height=4).pack(fill=tk.BOTH, expand=0)
# Set window name
window_title = "Move Polygon Program"
window.title(window_title)
# We can embed the window into RoboDK as a docked window
# Make sure the window title is unique
EmbedWindow(window_title)
# Important to display the graphical user interface
window.mainloop()
|
process.py
|
import importlib
import os
import signal
import time
import subprocess
from abc import ABC, abstractmethod
from multiprocessing import Process
from setproctitle import setproctitle # pylint: disable=no-name-in-module
import cereal.messaging as messaging
import selfdrive.crash as crash
from common.basedir import BASEDIR
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.swaglog import cloudlog
from selfdrive.hardware import HARDWARE
from cereal import log
WATCHDOG_FN = "/dev/shm/wd_"
ENABLE_WATCHDOG = os.getenv("NO_WATCHDOG") is None
def launcher(proc):
try:
# import the process
mod = importlib.import_module(proc)
# rename the process
setproctitle(proc)
# create new context since we forked
messaging.context = messaging.Context()
# exec the process
mod.main()
except KeyboardInterrupt:
cloudlog.warning("child %s got SIGINT" % proc)
except Exception:
# can't install the crash handler because sys.excepthook doesn't play nice
# with threads, so catch it here.
crash.capture_exception()
raise
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
os.execvp(pargs[0], pargs)
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.monotonic()
while time.monotonic() - t < timeout and process.exitcode is None:
time.sleep(0.001)
class ManagerProcess(ABC):
unkillable = False
daemon = False
sigkill = False
proc = None
enabled = True
name = ""
last_watchdog_time = 0
watchdog_max_dt = None
watchdog_seen = False
shutting_down = False
@abstractmethod
def prepare(self):
pass
@abstractmethod
def start(self):
pass
def restart(self):
self.stop()
self.start()
def check_watchdog(self, started):
if self.watchdog_max_dt is None or self.proc is None:
return
try:
fn = WATCHDOG_FN + str(self.proc.pid)
self.last_watchdog_time = int(open(fn).read())
except Exception:
pass
dt = sec_since_boot() - self.last_watchdog_time / 1e9
if dt > self.watchdog_max_dt:
# Only restart while offroad for now
if self.watchdog_seen and ENABLE_WATCHDOG:
cloudlog.error(f"Watchdog timeout for {self.name} (exitcode {self.proc.exitcode}) restarting ({started=})")
self.restart()
else:
self.watchdog_seen = True
def stop(self, retry=True, block=True):
if self.proc is None:
return
if self.proc.exitcode is None:
if not self.shutting_down:
cloudlog.info(f"killing {self.name}")
sig = signal.SIGKILL if self.sigkill else signal.SIGINT
self.signal(sig)
self.shutting_down = True
if not block:
return
join_process(self.proc, 5)
# If process failed to die send SIGKILL or reboot
if self.proc.exitcode is None and retry:
if self.unkillable:
cloudlog.critical(f"unkillable process {self.name} failed to exit! rebooting in 15 if it doesn't die")
join_process(self.proc, 15)
if self.proc.exitcode is None:
cloudlog.critical(f"unkillable process {self.name} failed to die!")
os.system("date >> /data/unkillable_reboot")
os.sync()
HARDWARE.reboot()
raise RuntimeError
else:
cloudlog.info(f"killing {self.name} with SIGKILL")
self.signal(signal.SIGKILL)
self.proc.join()
ret = self.proc.exitcode
cloudlog.info(f"{self.name} is dead with {ret}")
if self.proc.exitcode is not None:
self.shutting_down = False
self.proc = None
return ret
def signal(self, sig):
if self.proc is None:
return
# Don't signal if already exited
if self.proc.exitcode is not None and self.proc.pid is not None:
return
cloudlog.info(f"sending signal {sig} to {self.name}")
os.kill(self.proc.pid, sig)
def get_process_state_msg(self):
state = log.ManagerState.ProcessState.new_message()
state.name = self.name
if self.proc:
state.running = self.proc.is_alive()
state.shouldBeRunning = self.proc is not None and not self.shutting_down
state.pid = self.proc.pid or 0
state.exitCode = self.proc.exitcode or 0
return state
class NativeProcess(ManagerProcess):
def __init__(self, name, cwd, cmdline, enabled=True, persistent=False, driverview=False, unkillable=False, sigkill=False, watchdog_max_dt=None):
self.name = name
self.cwd = cwd
self.cmdline = cmdline
self.enabled = enabled
self.persistent = persistent
self.driverview = driverview
self.unkillable = unkillable
self.sigkill = sigkill
self.watchdog_max_dt = watchdog_max_dt
def prepare(self):
pass
def start(self):
# In case we only tried a non blocking stop we need to stop it before restarting
if self.shutting_down:
self.stop()
if self.proc is not None:
return
cwd = os.path.join(BASEDIR, self.cwd)
cloudlog.info("starting process %s" % self.name)
self.proc = Process(name=self.name, target=nativelauncher, args=(self.cmdline, cwd))
self.proc.start()
self.watchdog_seen = False
self.shutting_down = False
class PythonProcess(ManagerProcess):
def __init__(self, name, module, enabled=True, persistent=False, driverview=False, unkillable=False, sigkill=False, watchdog_max_dt=None):
self.name = name
self.module = module
self.enabled = enabled
self.persistent = persistent
self.driverview = driverview
self.unkillable = unkillable
self.sigkill = sigkill
self.watchdog_max_dt = watchdog_max_dt
def prepare(self):
if self.enabled:
cloudlog.info("preimporting %s" % self.module)
importlib.import_module(self.module)
def start(self):
# In case we only tried a non blocking stop we need to stop it before restarting
if self.shutting_down:
self.stop()
if self.proc is not None:
return
cloudlog.info("starting python %s" % self.module)
self.proc = Process(name=self.name, target=launcher, args=(self.module,))
self.proc.start()
self.watchdog_seen = False
self.shutting_down = False
class DaemonProcess(ManagerProcess):
"""Python process that has to stay running across manager restart.
This is used for athena so you don't lose SSH access when restarting manager."""
def __init__(self, name, module, param_name, enabled=True):
self.name = name
self.module = module
self.param_name = param_name
self.enabled = enabled
self.persistent = True
def prepare(self):
pass
def start(self):
params = Params()
pid = params.get(self.param_name, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if self.module in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % self.name)
proc = subprocess.Popen(['python', '-m', self.module], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(self.param_name, str(proc.pid))
def stop(self, retry=True, block=True):
pass
def ensure_running(procs, started, driverview=False, not_run=None):
if not_run is None:
not_run = []
for p in procs:
if p.name in not_run:
p.stop(block=False)
elif not p.enabled:
p.stop(block=False)
elif p.persistent:
p.start()
elif p.driverview and driverview:
p.start()
elif started:
p.start()
else:
p.stop(block=False)
p.check_watchdog(started)
|
bittrexticker.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys, traceback
import threading
import time
import simplejson as json
import urllib2
from PyQt4 import QtGui,QtCore
from boardlet import Boardlet
from modellet import Modellet
class BittrexTicker(Boardlet):
def __init__(self, parent, targetCurr):
super(BittrexTicker, self).__init__(parent)
self.p_model = Bittrex( targetCurr )
self.initUI()
def initUI(self):
super(BittrexTicker, self).initUI()
self.p_icon = QtGui.QLabel(self)
self.p_icon.setGeometry( self.b_imgx(), self.b_imgy(),
self.b_iconwidth(),self.b_iconheight() )
self.p_icon.setPixmap( QtGui.QPixmap(os.getcwd() + "/img/xmr.png" ) )
t = threading.Thread(target=self.periodicUpdate)
t.setDaemon(True)
t.start()
def paintEvent(self, e):
super(BittrexTicker, self).paintEvent(e)
qp = QtGui.QPainter()
qp.begin(self)
qp.setPen( self.p_grayPen )
qp.setFont( self.p_pairFont )
qp.drawText( self.b_col1x(), self.b_row1y(),
'Bittrex BTC' + self.p_model.getTargetCurr() )
qp.setPen( self.p_whitePen )
qp.setFont( self.p_normFont )
qp.drawText( self.b_col1x(), self.b_row2y() - 5,
'bid: ' + self.p_model.getBestBid() )
qp.drawText( self.b_col1x(), self.b_row3y() - 5,
'ask: ' + self.p_model.getBestAsk() )
qp.setFont( self.p_timeFont )
qp.setPen( self.p_grayPen )
qp.drawText( self.b_imgx(), self.b_row4y(),
'Refreshed: ' + self.p_model.getLastUpdated() )
qp.end()
def periodicUpdate(self):
while(True):
st = self.getNextWaitTimeSeconds()
time.sleep( st )
self.p_model.doRefresh()
class Bittrex(Modellet):
def __init__(self, targetCurr='XMR'):
self.p_targetCurr = targetCurr
self.p_refreshTime = None
self.p_bestBid = '000.00'
self.p_bestAsk = '000.00'
def getBestBid(self):
return self.p_bestBid
def getBestAsk(self):
return self.p_bestAsk
def getTargetCurr(self):
return self.p_targetCurr
def doRefresh(self):
headers = {'User-agent' : 'Mozilla/5.0'}
req = urllib2.Request( 'https://bittrex.com/api/v1.1/public/getticker?market=BTC-XMR', None, headers )
try:
resp = urllib2.urlopen(req).read()
self.p_bestBid = str( json.loads(resp)['result']['Bid'] )
self.p_bestAsk = str( json.loads(resp)['result']['Ask'] )
super(Bittrex, self).setFaultFlag(False)
super(Bittrex, self).setLastUpdatedNow()
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print ''.join('!! ' + line for line in lines)
super(Bittrex, self).setFaultFlag(True)
|
delegate.py
|
import time
from abc import ABC
from multiprocessing.connection import Client, Listener
from threading import Thread
from typing import Any, Optional
__all__ = [
'Delegate',
'Emitter',
'check_event', 'on_event'
]
class Delegate(ABC):
def check_event(self, event: str, data: Any = None) -> int:
pass
def on_event(self, event: str, data: Any = None):
pass
class Emitter:
def __init__(self):
self.delegate = None
def connect(self, delegate: Delegate):
self.delegate = delegate
def check_event(self, event: str, data: Any = None) -> int:
return check_event(self.delegate, event, data)
def on_event(self, event: str, data: Any = None):
on_event(self.delegate, event, data)
def check_event(delegate: Optional[Delegate], event: str, data: Any = None) -> int:
if delegate is not None:
return delegate.check_event(event, data)
return False
def on_event(delegate: Optional[Delegate], event: str, data: Any = None):
if delegate is not None:
delegate.on_event(event, data)
class SocketDelegate(Delegate):
def __init__(self):
self._client = None
self._queue = []
self._poll_interval = 0.001
self._update_interval = 0.001
self._time_since_last_update = 0
def connect(self, port: int):
self._client = Client(('localhost', port))
Thread(target=self._run, daemon=True).start()
def on_custom_event(self, code: str, event: str, data: Any) -> Any:
pass
def _run(self):
while True:
time.sleep(self._poll_interval)
self._time_since_last_update += self._poll_interval
self._read_events()
if self._time_since_last_update >= self._update_interval:
self._time_since_last_update = 0
self._update()
def _read_events(self):
while self._client.poll():
self._queue.append(self._client.recv())
def _update(self):
for t, event, data in self._queue:
if t == 'c':
self._client.send(self.check_event(event, data))
elif t == 'e':
self.on_event(event, data)
else:
result = self.on_custom_event(t, event, data)
if result is not None:
self._client.send(result)
self._queue.clear()
class SocketEmitter(Emitter):
def __init__(self):
super().__init__()
self._conn = None
self._server = None
def connect(self, port: int):
self._server = Listener(('localhost', port))
Thread(target=self._connect, daemon=True).start()
def wait_for_client(self):
while self._conn is None:
time.sleep(0.001)
def check_event(self, event: str, data: Any = None) -> int:
self._conn.send(('c', event, data))
return self._conn.recv()
def on_event(self, event: str, data: Any = None):
self._conn.send(('e', event, data))
def _connect(self):
self._conn = self._server.accept()
|
hub.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import os
import select
import socket
import threading
import time
import uuid
import warnings
import queue
import xmlrpc.client as xmlrpc
from urllib.parse import urlunparse
from .. import log
from .constants import SAMP_STATUS_OK
from .constants import __profile_version__
from .errors import SAMPWarning, SAMPHubError, SAMPProxyError
from .utils import internet_on, ServerProxyPool, _HubAsClient
from .lockfile_helpers import read_lockfile, create_lock_file
from .standard_profile import ThreadingXMLRPCServer
from .web_profile import WebProfileXMLRPCServer, web_profile_text_dialog
__all__ = ['SAMPHubServer', 'WebProfileDialog']
__doctest_skip__ = ['.', 'SAMPHubServer.*']
class SAMPHubServer:
"""
SAMP Hub Server.
Parameters
----------
secret : str, optional
The secret code to use for the SAMP lockfile. If none is is specified,
the :func:`uuid.uuid1` function is used to generate one.
addr : str, optional
Listening address (or IP). This defaults to 127.0.0.1 if the internet
is not reachable, otherwise it defaults to the host name.
port : int, optional
Listening XML-RPC server socket port. If left set to 0 (the default),
the operating system will select a free port.
lockfile : str, optional
Custom lockfile name.
timeout : int, optional
Hub inactivity timeout. If ``timeout > 0`` then the Hub automatically
stops after an inactivity period longer than ``timeout`` seconds. By
default ``timeout`` is set to 0 (Hub never expires).
client_timeout : int, optional
Client inactivity timeout. If ``client_timeout > 0`` then the Hub
automatically unregisters the clients which result inactive for a
period longer than ``client_timeout`` seconds. By default
``client_timeout`` is set to 0 (clients never expire).
mode : str, optional
Defines the Hub running mode. If ``mode`` is ``'single'`` then the Hub
runs using the standard ``.samp`` lock-file, having a single instance
for user desktop session. Otherwise, if ``mode`` is ``'multiple'``,
then the Hub runs using a non-standard lock-file, placed in
``.samp-1`` directory, of the form ``samp-hub-<UUID>``, where
``<UUID>`` is a unique UUID assigned to the hub.
label : str, optional
A string used to label the Hub with a human readable name. This string
is written in the lock-file assigned to the ``hub.label`` token.
web_profile : bool, optional
Enables or disables the Web Profile support.
web_profile_dialog : class, optional
Allows a class instance to be specified using ``web_profile_dialog``
to replace the terminal-based message with e.g. a GUI pop-up. Two
`queue.Queue` instances will be added to the instance as attributes
``queue_request`` and ``queue_result``. When a request is received via
the ``queue_request`` queue, the pop-up should be displayed, and a
value of `True` or `False` should be added to ``queue_result``
depending on whether the user accepted or refused the connection.
web_port : int, optional
The port to use for web SAMP. This should not be changed except for
testing purposes, since web SAMP should always use port 21012.
pool_size : int, optional
The number of socket connections opened to communicate with the
clients.
"""
def __init__(self, secret=None, addr=None, port=0, lockfile=None,
timeout=0, client_timeout=0, mode='single', label="",
web_profile=True, web_profile_dialog=None, web_port=21012,
pool_size=20):
# Generate random ID for the hub
self._id = str(uuid.uuid1())
# General settings
self._is_running = False
self._customlockfilename = lockfile
self._lockfile = None
self._addr = addr
self._port = port
self._mode = mode
self._label = label
self._timeout = timeout
self._client_timeout = client_timeout
self._pool_size = pool_size
# Web profile specific attributes
self._web_profile = web_profile
self._web_profile_dialog = web_profile_dialog
self._web_port = web_port
self._web_profile_server = None
self._web_profile_callbacks = {}
self._web_profile_requests_queue = None
self._web_profile_requests_result = None
self._web_profile_requests_semaphore = None
self._host_name = "127.0.0.1"
if internet_on():
try:
self._host_name = socket.getfqdn()
socket.getaddrinfo(self._addr or self._host_name,
self._port or 0)
except socket.error:
self._host_name = "127.0.0.1"
# Threading stuff
self._thread_lock = threading.Lock()
self._thread_run = None
self._thread_hub_timeout = None
self._thread_client_timeout = None
self._launched_threads = []
# Variables for timeout testing:
self._last_activity_time = None
self._client_activity_time = {}
# Hub message id counter, used to create hub msg ids
self._hub_msg_id_counter = 0
# Hub secret code
self._hub_secret_code_customized = secret
self._hub_secret = self._create_secret_code()
# Hub public id (as SAMP client)
self._hub_public_id = ""
# Client ids
# {private_key: (public_id, timestamp)}
self._private_keys = {}
# Metadata per client
# {private_key: metadata}
self._metadata = {}
# List of subscribed clients per MType
# {mtype: private_key list}
self._mtype2ids = {}
# List of subscribed MTypes per client
# {private_key: mtype list}
self._id2mtypes = {}
# List of XML-RPC addresses per client
# {public_id: (XML-RPC address, ServerProxyPool instance)}
self._xmlrpc_endpoints = {}
# Synchronous message id heap
self._sync_msg_ids_heap = {}
# Public ids counter
self._client_id_counter = -1
@property
def id(self):
"""
The unique hub ID.
"""
return self._id
def _register_standard_api(self, server):
# Standard Profile only operations
server.register_function(self._ping, 'samp.hub.ping')
server.register_function(self._set_xmlrpc_callback, 'samp.hub.setXmlrpcCallback')
# Standard API operations
server.register_function(self._register, 'samp.hub.register')
server.register_function(self._unregister, 'samp.hub.unregister')
server.register_function(self._declare_metadata, 'samp.hub.declareMetadata')
server.register_function(self._get_metadata, 'samp.hub.getMetadata')
server.register_function(self._declare_subscriptions, 'samp.hub.declareSubscriptions')
server.register_function(self._get_subscriptions, 'samp.hub.getSubscriptions')
server.register_function(self._get_registered_clients, 'samp.hub.getRegisteredClients')
server.register_function(self._get_subscribed_clients, 'samp.hub.getSubscribedClients')
server.register_function(self._notify, 'samp.hub.notify')
server.register_function(self._notify_all, 'samp.hub.notifyAll')
server.register_function(self._call, 'samp.hub.call')
server.register_function(self._call_all, 'samp.hub.callAll')
server.register_function(self._call_and_wait, 'samp.hub.callAndWait')
server.register_function(self._reply, 'samp.hub.reply')
def _register_web_profile_api(self, server):
# Web Profile methods like Standard Profile
server.register_function(self._ping, 'samp.webhub.ping')
server.register_function(self._unregister, 'samp.webhub.unregister')
server.register_function(self._declare_metadata, 'samp.webhub.declareMetadata')
server.register_function(self._get_metadata, 'samp.webhub.getMetadata')
server.register_function(self._declare_subscriptions, 'samp.webhub.declareSubscriptions')
server.register_function(self._get_subscriptions, 'samp.webhub.getSubscriptions')
server.register_function(self._get_registered_clients, 'samp.webhub.getRegisteredClients')
server.register_function(self._get_subscribed_clients, 'samp.webhub.getSubscribedClients')
server.register_function(self._notify, 'samp.webhub.notify')
server.register_function(self._notify_all, 'samp.webhub.notifyAll')
server.register_function(self._call, 'samp.webhub.call')
server.register_function(self._call_all, 'samp.webhub.callAll')
server.register_function(self._call_and_wait, 'samp.webhub.callAndWait')
server.register_function(self._reply, 'samp.webhub.reply')
# Methods particularly for Web Profile
server.register_function(self._web_profile_register, 'samp.webhub.register')
server.register_function(self._web_profile_allowReverseCallbacks, 'samp.webhub.allowReverseCallbacks')
server.register_function(self._web_profile_pullCallbacks, 'samp.webhub.pullCallbacks')
def _start_standard_server(self):
self._server = ThreadingXMLRPCServer(
(self._addr or self._host_name, self._port or 0),
log, logRequests=False, allow_none=True)
prot = 'http'
self._port = self._server.socket.getsockname()[1]
addr = "{0}:{1}".format(self._addr or self._host_name, self._port)
self._url = urlunparse((prot, addr, '', '', '', ''))
self._server.register_introspection_functions()
self._register_standard_api(self._server)
def _start_web_profile_server(self):
self._web_profile_requests_queue = queue.Queue(1)
self._web_profile_requests_result = queue.Queue(1)
self._web_profile_requests_semaphore = queue.Queue(1)
if self._web_profile_dialog is not None:
# TODO: Some sort of duck-typing on the web_profile_dialog object
self._web_profile_dialog.queue_request = \
self._web_profile_requests_queue
self._web_profile_dialog.queue_result = \
self._web_profile_requests_result
try:
self._web_profile_server = WebProfileXMLRPCServer(
('localhost', self._web_port), log, logRequests=False,
allow_none=True)
self._web_port = self._web_profile_server.socket.getsockname()[1]
self._web_profile_server.register_introspection_functions()
self._register_web_profile_api(self._web_profile_server)
log.info("Hub set to run with Web Profile support enabled.")
except socket.error:
log.warning("Port {0} already in use. Impossible to run the "
"Hub with Web Profile support.".format(self._web_port),
SAMPWarning)
self._web_profile = False
# Cleanup
self._web_profile_requests_queue = None
self._web_profile_requests_result = None
self._web_profile_requests_semaphore = None
def _launch_thread(self, group=None, target=None, name=None, args=None):
# Remove inactive threads
remove = []
for t in self._launched_threads:
if not t.is_alive():
remove.append(t)
for t in remove:
self._launched_threads.remove(t)
# Start new thread
t = threading.Thread(group=group, target=target, name=name, args=args)
t.start()
# Add to list of launched threads
self._launched_threads.append(t)
def _join_launched_threads(self, timeout=None):
for t in self._launched_threads:
t.join(timeout=timeout)
def _timeout_test_hub(self):
if self._timeout == 0:
return
last = time.time()
while self._is_running:
time.sleep(0.05) # keep this small to check _is_running often
now = time.time()
if now - last > 1.:
with self._thread_lock:
if self._last_activity_time is not None:
if now - self._last_activity_time >= self._timeout:
warnings.warn("Timeout expired, Hub is shutting down!",
SAMPWarning)
self.stop()
return
last = now
def _timeout_test_client(self):
if self._client_timeout == 0:
return
last = time.time()
while self._is_running:
time.sleep(0.05) # keep this small to check _is_running often
now = time.time()
if now - last > 1.:
for private_key in self._client_activity_time.keys():
if (now - self._client_activity_time[private_key] > self._client_timeout
and private_key != self._hub_private_key):
warnings.warn(
"Client {} timeout expired!".format(private_key),
SAMPWarning)
self._notify_disconnection(private_key)
self._unregister(private_key)
last = now
def _hub_as_client_request_handler(self, method, args):
if method == 'samp.client.receiveCall':
return self._receive_call(*args)
elif method == 'samp.client.receiveNotification':
return self._receive_notification(*args)
elif method == 'samp.client.receiveResponse':
return self._receive_response(*args)
elif method == 'samp.app.ping':
return self._ping(*args)
def _setup_hub_as_client(self):
hub_metadata = {"samp.name": "Astropy SAMP Hub",
"samp.description.text": self._label,
"author.name": "The Astropy Collaboration",
"samp.documentation.url": "http://docs.astropy.org/en/stable/samp",
"samp.icon.url": self._url + "/samp/icon"}
result = self._register(self._hub_secret)
self._hub_public_id = result["samp.self-id"]
self._hub_private_key = result["samp.private-key"]
self._set_xmlrpc_callback(self._hub_private_key, self._url)
self._declare_metadata(self._hub_private_key, hub_metadata)
self._declare_subscriptions(self._hub_private_key,
{"samp.app.ping": {},
"x-samp.query.by-meta": {}})
def start(self, wait=False):
"""
Start the current SAMP Hub instance and create the lock file. Hub
start-up can be blocking or non blocking depending on the ``wait``
parameter.
Parameters
----------
wait : bool
If `True` then the Hub process is joined with the caller, blocking
the code flow. Usually `True` option is used to run a stand-alone
Hub in an executable script. If `False` (default), then the Hub
process runs in a separated thread. `False` is usually used in a
Python shell.
"""
if self._is_running:
raise SAMPHubError("Hub is already running")
if self._lockfile is not None:
raise SAMPHubError("Hub is not running but lockfile is set")
if self._web_profile:
self._start_web_profile_server()
self._start_standard_server()
self._lockfile = create_lock_file(lockfilename=self._customlockfilename,
mode=self._mode, hub_id=self.id,
hub_params=self.params)
self._update_last_activity_time()
self._setup_hub_as_client()
self._start_threads()
log.info("Hub started")
if wait and self._is_running:
self._thread_run.join()
self._thread_run = None
@property
def params(self):
"""
The hub parameters (which are written to the logfile)
"""
params = {}
# Keys required by standard profile
params['samp.secret'] = self._hub_secret
params['samp.hub.xmlrpc.url'] = self._url
params['samp.profile.version'] = __profile_version__
# Custom keys
params['hub.id'] = self.id
params['hub.label'] = self._label or "Hub {0}".format(self.id)
return params
def _start_threads(self):
self._thread_run = threading.Thread(target=self._serve_forever)
self._thread_run.daemon = True
if self._timeout > 0:
self._thread_hub_timeout = threading.Thread(
target=self._timeout_test_hub,
name="Hub timeout test")
self._thread_hub_timeout.daemon = True
else:
self._thread_hub_timeout = None
if self._client_timeout > 0:
self._thread_client_timeout = threading.Thread(
target=self._timeout_test_client,
name="Client timeout test")
self._thread_client_timeout.daemon = True
else:
self._thread_client_timeout = None
self._is_running = True
self._thread_run.start()
if self._thread_hub_timeout is not None:
self._thread_hub_timeout.start()
if self._thread_client_timeout is not None:
self._thread_client_timeout.start()
def _create_secret_code(self):
if self._hub_secret_code_customized is not None:
return self._hub_secret_code_customized
else:
return str(uuid.uuid1())
def stop(self):
"""
Stop the current SAMP Hub instance and delete the lock file.
"""
if not self._is_running:
return
log.info("Hub is stopping...")
self._notify_shutdown()
self._is_running = False
if self._lockfile and os.path.isfile(self._lockfile):
lockfiledict = read_lockfile(self._lockfile)
if lockfiledict['samp.secret'] == self._hub_secret:
os.remove(self._lockfile)
self._lockfile = None
# Reset variables
# TODO: What happens if not all threads are stopped after timeout?
self._join_all_threads(timeout=10.)
self._hub_msg_id_counter = 0
self._hub_secret = self._create_secret_code()
self._hub_public_id = ""
self._metadata = {}
self._private_keys = {}
self._mtype2ids = {}
self._id2mtypes = {}
self._xmlrpc_endpoints = {}
self._last_activity_time = None
log.info("Hub stopped.")
def _join_all_threads(self, timeout=None):
# In some cases, ``stop`` may be called from some of the sub-threads,
# so we just need to make sure that we don't try and shut down the
# calling thread.
current_thread = threading.current_thread()
if self._thread_run is not current_thread:
self._thread_run.join(timeout=timeout)
if not self._thread_run.is_alive():
self._thread_run = None
if self._thread_hub_timeout is not None and self._thread_hub_timeout is not current_thread:
self._thread_hub_timeout.join(timeout=timeout)
if not self._thread_hub_timeout.is_alive():
self._thread_hub_timeout = None
if self._thread_client_timeout is not None and self._thread_client_timeout is not current_thread:
self._thread_client_timeout.join(timeout=timeout)
if not self._thread_client_timeout.is_alive():
self._thread_client_timeout = None
self._join_launched_threads(timeout=timeout)
@property
def is_running(self):
"""Return an information concerning the Hub running status.
Returns
-------
running : bool
Is the hub running?
"""
return self._is_running
def _serve_forever(self):
while self._is_running:
try:
read_ready = select.select([self._server.socket], [], [], 0.01)[0]
except OSError as exc:
warnings.warn("Call to select() in SAMPHubServer failed: {0}".format(exc),
SAMPWarning)
else:
if read_ready:
self._server.handle_request()
if self._web_profile:
# We now check if there are any connection requests from the
# web profile, and if so, we initialize the pop-up.
if self._web_profile_dialog is None:
try:
request = self._web_profile_requests_queue.get_nowait()
except queue.Empty:
pass
else:
web_profile_text_dialog(request, self._web_profile_requests_result)
# We now check for requests over the web profile socket, and we
# also update the pop-up in case there are any changes.
try:
read_ready = select.select([self._web_profile_server.socket], [], [], 0.01)[0]
except OSError as exc:
warnings.warn("Call to select() in SAMPHubServer failed: {0}".format(exc),
SAMPWarning)
else:
if read_ready:
self._web_profile_server.handle_request()
self._server.server_close()
if self._web_profile_server is not None:
self._web_profile_server.server_close()
def _notify_shutdown(self):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.shutdown")
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
self._notify_(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.shutdown",
"samp.params": {}})
def _notify_register(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.register")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
# if key != private_key:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.register",
"samp.params": {"id": public_id}})
def _notify_unregister(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.unregister")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
if key != private_key:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.unregister",
"samp.params": {"id": public_id}})
def _notify_metadata(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.metadata")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
# if key != private_key:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.metadata",
"samp.params": {"id": public_id,
"metadata": self._metadata[private_key]}
})
def _notify_subscriptions(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.subscriptions")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.subscriptions",
"samp.params": {"id": public_id,
"subscriptions": self._id2mtypes[private_key]}
})
def _notify_disconnection(self, private_key):
def _xmlrpc_call_disconnect(endpoint, private_key, hub_public_id, message):
endpoint.samp.client.receiveNotification(private_key, hub_public_id, message)
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.disconnect")
public_id = self._private_keys[private_key][0]
endpoint = self._xmlrpc_endpoints[public_id][1]
for mtype in msubs:
if mtype in self._mtype2ids and private_key in self._mtype2ids[mtype]:
log.debug("notify disconnection to {}".format(public_id))
self._launch_thread(target=_xmlrpc_call_disconnect,
args=(endpoint, private_key,
self._hub_public_id,
{"samp.mtype": "samp.hub.disconnect",
"samp.params": {"reason": "Timeout expired!"}}))
def _ping(self):
self._update_last_activity_time()
log.debug("ping")
return "1"
def _query_by_metadata(self, key, value):
public_id_list = []
for private_id in self._metadata:
if key in self._metadata[private_id]:
if self._metadata[private_id][key] == value:
public_id_list.append(self._private_keys[private_id][0])
return public_id_list
def _set_xmlrpc_callback(self, private_key, xmlrpc_addr):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if private_key == self._hub_private_key:
public_id = self._private_keys[private_key][0]
self._xmlrpc_endpoints[public_id] = \
(xmlrpc_addr, _HubAsClient(self._hub_as_client_request_handler))
return ""
# Dictionary stored with the public id
log.debug("set_xmlrpc_callback: {} {}".format(private_key,
xmlrpc_addr))
server_proxy_pool = None
server_proxy_pool = ServerProxyPool(self._pool_size,
xmlrpc.ServerProxy,
xmlrpc_addr, allow_none=1)
public_id = self._private_keys[private_key][0]
self._xmlrpc_endpoints[public_id] = (xmlrpc_addr,
server_proxy_pool)
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return ""
def _perform_standard_register(self):
with self._thread_lock:
private_key, public_id = self._get_new_ids()
self._private_keys[private_key] = (public_id, time.time())
self._update_last_activity_time(private_key)
self._notify_register(private_key)
log.debug("register: private-key = {} and self-id = {}"
.format(private_key, public_id))
return {"samp.self-id": public_id,
"samp.private-key": private_key,
"samp.hub-id": self._hub_public_id}
def _register(self, secret):
self._update_last_activity_time()
if secret == self._hub_secret:
return self._perform_standard_register()
else:
# return {"samp.self-id": "", "samp.private-key": "", "samp.hub-id": ""}
raise SAMPProxyError(7, "Bad secret code")
def _get_new_ids(self):
private_key = str(uuid.uuid1())
self._client_id_counter += 1
public_id = 'cli#hub'
if self._client_id_counter > 0:
public_id = "cli#{}".format(self._client_id_counter)
return private_key, public_id
def _unregister(self, private_key):
self._update_last_activity_time()
public_key = ""
self._notify_unregister(private_key)
with self._thread_lock:
if private_key in self._private_keys:
public_key = self._private_keys[private_key][0]
del self._private_keys[private_key]
else:
return ""
if private_key in self._metadata:
del self._metadata[private_key]
if private_key in self._id2mtypes:
del self._id2mtypes[private_key]
for mtype in self._mtype2ids.keys():
if private_key in self._mtype2ids[mtype]:
self._mtype2ids[mtype].remove(private_key)
if public_key in self._xmlrpc_endpoints:
del self._xmlrpc_endpoints[public_key]
if private_key in self._client_activity_time:
del self._client_activity_time[private_key]
if self._web_profile:
if private_key in self._web_profile_callbacks:
del self._web_profile_callbacks[private_key]
self._web_profile_server.remove_client(private_key)
log.debug("unregister {} ({})".format(public_key, private_key))
return ""
def _declare_metadata(self, private_key, metadata):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
log.debug("declare_metadata: private-key = {} metadata = {}"
.format(private_key, str(metadata)))
self._metadata[private_key] = metadata
self._notify_metadata(private_key)
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return ""
def _get_metadata(self, private_key, client_id):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
client_private_key = self._public_id_to_private_key(client_id)
log.debug("get_metadata: private-key = {} client-id = {}"
.format(private_key, client_id))
if client_private_key is not None:
if client_private_key in self._metadata:
log.debug("--> metadata = {}"
.format(self._metadata[client_private_key]))
return self._metadata[client_private_key]
else:
return {}
else:
raise SAMPProxyError(6, "Invalid client ID")
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _declare_subscriptions(self, private_key, mtypes):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
log.debug("declare_subscriptions: private-key = {} mtypes = {}"
.format(private_key, str(mtypes)))
# remove subscription to previous mtypes
if private_key in self._id2mtypes:
prev_mtypes = self._id2mtypes[private_key]
for mtype in prev_mtypes:
try:
self._mtype2ids[mtype].remove(private_key)
except ValueError: # private_key is not in list
pass
self._id2mtypes[private_key] = copy.deepcopy(mtypes)
# remove duplicated MType for wildcard overwriting
original_mtypes = copy.deepcopy(mtypes)
for mtype in original_mtypes:
if mtype.endswith("*"):
for mtype2 in original_mtypes:
if mtype2.startswith(mtype[:-1]) and \
mtype2 != mtype:
if mtype2 in mtypes:
del(mtypes[mtype2])
log.debug("declare_subscriptions: subscriptions accepted from "
"{} => {}".format(private_key, str(mtypes)))
for mtype in mtypes:
if mtype in self._mtype2ids:
if private_key not in self._mtype2ids[mtype]:
self._mtype2ids[mtype].append(private_key)
else:
self._mtype2ids[mtype] = [private_key]
self._notify_subscriptions(private_key)
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return ""
def _get_subscriptions(self, private_key, client_id):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
client_private_key = self._public_id_to_private_key(client_id)
if client_private_key is not None:
if client_private_key in self._id2mtypes:
log.debug("get_subscriptions: client-id = {} mtypes = {}"
.format(client_id,
str(self._id2mtypes[client_private_key])))
return self._id2mtypes[client_private_key]
else:
log.debug("get_subscriptions: client-id = {} mtypes = "
"missing".format(client_id))
return {}
else:
raise SAMPProxyError(6, "Invalid client ID")
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _get_registered_clients(self, private_key):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
reg_clients = []
for pkey in self._private_keys.keys():
if pkey != private_key:
reg_clients.append(self._private_keys[pkey][0])
log.debug("get_registered_clients: private_key = {} clients = {}"
.format(private_key, reg_clients))
return reg_clients
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _get_subscribed_clients(self, private_key, mtype):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
sub_clients = {}
for pkey in self._private_keys.keys():
if pkey != private_key and self._is_subscribed(pkey, mtype):
sub_clients[self._private_keys[pkey][0]] = {}
log.debug("get_subscribed_clients: private_key = {} mtype = {} "
"clients = {}".format(private_key, mtype, sub_clients))
return sub_clients
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
@staticmethod
def get_mtype_subtypes(mtype):
"""
Return a list containing all the possible wildcarded subtypes of MType.
Parameters
----------
mtype : str
MType to be parsed.
Returns
-------
types : list
List of subtypes
Examples
--------
>>> from astropy.samp import SAMPHubServer
>>> SAMPHubServer.get_mtype_subtypes("samp.app.ping")
['samp.app.ping', 'samp.app.*', 'samp.*', '*']
"""
subtypes = []
msubs = mtype.split(".")
indexes = list(range(len(msubs)))
indexes.reverse()
indexes.append(-1)
for i in indexes:
tmp_mtype = ".".join(msubs[:i + 1])
if tmp_mtype != mtype:
if tmp_mtype != "":
tmp_mtype = tmp_mtype + ".*"
else:
tmp_mtype = "*"
subtypes.append(tmp_mtype)
return subtypes
def _is_subscribed(self, private_key, mtype):
subscribed = False
msubs = SAMPHubServer.get_mtype_subtypes(mtype)
for msub in msubs:
if msub in self._mtype2ids:
if private_key in self._mtype2ids[msub]:
subscribed = True
return subscribed
def _notify(self, private_key, recipient_id, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if self._is_subscribed(self._public_id_to_private_key(recipient_id),
message["samp.mtype"]) is False:
raise SAMPProxyError(2, "Client {} not subscribed to MType {}"
.format(recipient_id, message["samp.mtype"]))
self._launch_thread(target=self._notify_, args=(private_key,
recipient_id,
message))
return {}
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _notify_(self, sender_private_key, recipient_public_id, message):
if sender_private_key not in self._private_keys:
return
sender_public_id = self._private_keys[sender_private_key][0]
try:
log.debug("notify {} from {} to {}".format(
message["samp.mtype"], sender_public_id,
recipient_public_id))
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (sender_public_id, message)
samp_method_name = "receiveNotification"
self._retry_method(recipient_private_key, recipient_public_id, samp_method_name, arg_params)
except Exception as exc:
warnings.warn("{} notification from client {} to client {} "
"failed [{}]".format(message["samp.mtype"],
sender_public_id,
recipient_public_id, exc),
SAMPWarning)
def _notify_all(self, private_key, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if "samp.mtype" not in message:
raise SAMPProxyError(3, "samp.mtype keyword is missing")
recipient_ids = self._notify_all_(private_key, message)
return recipient_ids
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _notify_all_(self, sender_private_key, message):
recipient_ids = []
msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"])
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
if key != sender_private_key:
_recipient_id = self._private_keys[key][0]
recipient_ids.append(_recipient_id)
self._launch_thread(target=self._notify,
args=(sender_private_key,
_recipient_id, message)
)
return recipient_ids
def _call(self, private_key, recipient_id, msg_tag, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if self._is_subscribed(self._public_id_to_private_key(recipient_id),
message["samp.mtype"]) is False:
raise SAMPProxyError(2, "Client {} not subscribed to MType {}"
.format(recipient_id, message["samp.mtype"]))
public_id = self._private_keys[private_key][0]
msg_id = self._get_new_hub_msg_id(public_id, msg_tag)
self._launch_thread(target=self._call_, args=(private_key, public_id,
recipient_id, msg_id,
message))
return msg_id
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _call_(self, sender_private_key, sender_public_id,
recipient_public_id, msg_id, message):
if sender_private_key not in self._private_keys:
return
try:
log.debug("call {} from {} to {} ({})".format(
msg_id.split(";;")[0], sender_public_id,
recipient_public_id, message["samp.mtype"]))
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (sender_public_id, msg_id, message)
samp_methodName = "receiveCall"
self._retry_method(recipient_private_key, recipient_public_id, samp_methodName, arg_params)
except Exception as exc:
warnings.warn("{} call {} from client {} to client {} failed "
"[{},{}]".format(message["samp.mtype"],
msg_id.split(";;")[0],
sender_public_id,
recipient_public_id, type(exc), exc),
SAMPWarning)
def _call_all(self, private_key, msg_tag, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if "samp.mtype" not in message:
raise SAMPProxyError(3, "samp.mtype keyword is missing in "
"message tagged as {}".format(msg_tag))
public_id = self._private_keys[private_key][0]
msg_id = self._call_all_(private_key, public_id, msg_tag, message)
return msg_id
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _call_all_(self, sender_private_key, sender_public_id, msg_tag,
message):
msg_id = {}
msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"])
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
if key != sender_private_key:
_msg_id = self._get_new_hub_msg_id(sender_public_id,
msg_tag)
receiver_public_id = self._private_keys[key][0]
msg_id[receiver_public_id] = _msg_id
self._launch_thread(target=self._call_,
args=(sender_private_key,
sender_public_id,
receiver_public_id, _msg_id,
message))
return msg_id
def _call_and_wait(self, private_key, recipient_id, message, timeout):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
timeout = int(timeout)
now = time.time()
response = {}
msg_id = self._call(private_key, recipient_id, "samp::sync::call",
message)
self._sync_msg_ids_heap[msg_id] = None
while self._is_running:
if 0 < timeout <= time.time() - now:
del(self._sync_msg_ids_heap[msg_id])
raise SAMPProxyError(1, "Timeout expired!")
if self._sync_msg_ids_heap[msg_id] is not None:
response = copy.deepcopy(self._sync_msg_ids_heap[msg_id])
del(self._sync_msg_ids_heap[msg_id])
break
time.sleep(0.01)
return response
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _reply(self, private_key, msg_id, response):
"""
The main method that gets called for replying. This starts up an
asynchronous reply thread and returns.
"""
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
self._launch_thread(target=self._reply_, args=(private_key, msg_id,
response))
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return {}
def _reply_(self, responder_private_key, msg_id, response):
if responder_private_key not in self._private_keys or not msg_id:
return
responder_public_id = self._private_keys[responder_private_key][0]
counter, hub_public_id, recipient_public_id, recipient_msg_tag = msg_id.split(";;", 3)
try:
log.debug("reply {} from {} to {}".format(
counter, responder_public_id, recipient_public_id))
if recipient_msg_tag == "samp::sync::call":
if msg_id in self._sync_msg_ids_heap.keys():
self._sync_msg_ids_heap[msg_id] = response
else:
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (responder_public_id, recipient_msg_tag, response)
samp_method_name = "receiveResponse"
self._retry_method(recipient_private_key, recipient_public_id, samp_method_name, arg_params)
except Exception as exc:
warnings.warn("{} reply from client {} to client {} failed [{}]"
.format(recipient_msg_tag, responder_public_id,
recipient_public_id, exc),
SAMPWarning)
def _retry_method(self, recipient_private_key, recipient_public_id, samp_method_name, arg_params):
"""
This method is used to retry a SAMP call several times.
Parameters
----------
recipient_private_key
The private key of the receiver of the call
recipient_public_key
The public key of the receiver of the call
samp_method_name : str
The name of the SAMP method to call
arg_params : tuple
Any additional arguments to be passed to the SAMP method
"""
if recipient_private_key is None:
raise SAMPHubError("Invalid client ID")
from . import conf
for attempt in range(conf.n_retries):
if not self._is_running:
time.sleep(0.01)
continue
try:
if (self._web_profile and
recipient_private_key in self._web_profile_callbacks):
# Web Profile
callback = {"samp.methodName": samp_method_name,
"samp.params": arg_params}
self._web_profile_callbacks[recipient_private_key].put(callback)
else:
# Standard Profile
hub = self._xmlrpc_endpoints[recipient_public_id][1]
getattr(hub.samp.client, samp_method_name)(recipient_private_key, *arg_params)
except xmlrpc.Fault as exc:
log.debug("{} XML-RPC endpoint error (attempt {}): {}"
.format(recipient_public_id, attempt + 1,
exc.faultString))
time.sleep(0.01)
else:
return
# If we are here, then the above attempts failed
error_message = samp_method_name + " failed after " + conf.n_retries + " attempts"
raise SAMPHubError(error_message)
def _public_id_to_private_key(self, public_id):
for private_key in self._private_keys.keys():
if self._private_keys[private_key][0] == public_id:
return private_key
return None
def _get_new_hub_msg_id(self, sender_public_id, sender_msg_id):
with self._thread_lock:
self._hub_msg_id_counter += 1
return "msg#{};;{};;{};;{}".format(self._hub_msg_id_counter,
self._hub_public_id,
sender_public_id, sender_msg_id)
def _update_last_activity_time(self, private_key=None):
with self._thread_lock:
self._last_activity_time = time.time()
if private_key is not None:
self._client_activity_time[private_key] = time.time()
def _receive_notification(self, private_key, sender_id, message):
return ""
def _receive_call(self, private_key, sender_id, msg_id, message):
if private_key == self._hub_private_key:
if "samp.mtype" in message and message["samp.mtype"] == "samp.app.ping":
self._reply(self._hub_private_key, msg_id,
{"samp.status": SAMP_STATUS_OK, "samp.result": {}})
elif ("samp.mtype" in message and
(message["samp.mtype"] == "x-samp.query.by-meta" or
message["samp.mtype"] == "samp.query.by-meta")):
ids_list = self._query_by_metadata(message["samp.params"]["key"],
message["samp.params"]["value"])
self._reply(self._hub_private_key, msg_id,
{"samp.status": SAMP_STATUS_OK,
"samp.result": {"ids": ids_list}})
return ""
else:
return ""
def _receive_response(self, private_key, responder_id, msg_tag, response):
return ""
def _web_profile_register(self, identity_info,
client_address=("unknown", 0),
origin="unknown"):
self._update_last_activity_time()
if not client_address[0] in ["localhost", "127.0.0.1"]:
raise SAMPProxyError(403, "Request of registration rejected "
"by the Hub.")
if not origin:
origin = "unknown"
if isinstance(identity_info, dict):
# an old version of the protocol provided just a string with the app name
if "samp.name" not in identity_info:
raise SAMPProxyError(403, "Request of registration rejected "
"by the Hub (application name not "
"provided).")
# Red semaphore for the other threads
self._web_profile_requests_semaphore.put("wait")
# Set the request to be displayed for the current thread
self._web_profile_requests_queue.put((identity_info, client_address,
origin))
# Get the popup dialogue response
response = self._web_profile_requests_result.get()
# OK, semaphore green
self._web_profile_requests_semaphore.get()
if response:
register_map = self._perform_standard_register()
translator_url = ("http://localhost:{}/translator/{}?ref="
.format(self._web_port, register_map["samp.private-key"]))
register_map["samp.url-translator"] = translator_url
self._web_profile_server.add_client(register_map["samp.private-key"])
return register_map
else:
raise SAMPProxyError(403, "Request of registration rejected by "
"the user.")
def _web_profile_allowReverseCallbacks(self, private_key, allow):
self._update_last_activity_time()
if private_key in self._private_keys:
if allow == "0":
if private_key in self._web_profile_callbacks:
del self._web_profile_callbacks[private_key]
else:
self._web_profile_callbacks[private_key] = queue.Queue()
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return ""
def _web_profile_pullCallbacks(self, private_key, timeout_secs):
self._update_last_activity_time()
if private_key in self._private_keys:
callback = []
callback_queue = self._web_profile_callbacks[private_key]
try:
while self._is_running:
item_queued = callback_queue.get_nowait()
callback.append(item_queued)
except queue.Empty:
pass
return callback
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
class WebProfileDialog:
"""
A base class to make writing Web Profile GUI consent dialogs
easier.
The concrete class must:
1) Poll ``handle_queue`` periodically, using the timer services
of the GUI's event loop. This function will call
``self.show_dialog`` when a request requires authorization.
``self.show_dialog`` will be given the arguments:
- ``samp_name``: The name of the application making the request.
- ``details``: A dictionary of details about the client
making the request.
- ``client``: A hostname, port pair containing the client
address.
- ``origin``: A string containing the origin of the
request.
2) Call ``consent`` or ``reject`` based on the user's response to
the dialog.
"""
def handle_queue(self):
try:
request = self.queue_request.get_nowait()
except queue.Empty: # queue is set but empty
pass
except AttributeError: # queue has not been set yet
pass
else:
if isinstance(request[0], str): # To support the old protocol version
samp_name = request[0]
else:
samp_name = request[0]["samp.name"]
self.show_dialog(samp_name, request[0], request[1], request[2])
def consent(self):
self.queue_result.put(True)
def reject(self):
self.queue_result.put(False)
|
multiprocessing_daemon.py
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""Daemon vs. non-daemon processes.
"""
#end_pymotw_header
import multiprocessing
import time
import sys
def daemon():
p = multiprocessing.current_process()
print 'Starting:', p.name, p.pid
sys.stdout.flush()
time.sleep(2)
print 'Exiting :', p.name, p.pid
sys.stdout.flush()
def non_daemon():
p = multiprocessing.current_process()
print 'Starting:', p.name, p.pid
sys.stdout.flush()
print 'Exiting :', p.name, p.pid
sys.stdout.flush()
if __name__ == '__main__':
d = multiprocessing.Process(name='daemon', target=daemon)
d.daemon = True
n = multiprocessing.Process(name='non-daemon', target=non_daemon)
n.daemon = False
d.start()
time.sleep(1)
n.start()
|
test_urllib.py
|
"""Regresssion tests for urllib"""
import urllib.parse
import urllib.request
import http.client
import email.message
import io
import unittest
from test import support
import os
import tempfile
import warnings
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
# Shortcut for testing FancyURLopener
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""urlopen(url [, data]) -> open file-like object"""
global _urlopener
if proxies is not None:
opener = urllib.request.FancyURLopener(proxies=proxies)
elif not _urlopener:
opener = urllib.request.FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
# Create a temp file to use for testing
self.text = bytes("test_urllib: %s\n" % self.__class__.__name__,
"ascii")
f = open(support.TESTFN, 'wb')
try:
f.write(self.text)
finally:
f.close()
self.pathname = support.TESTFN
self.returned_obj = urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual(b'', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertTrue(isinstance(file_num, int),
"fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it hear and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertTrue(isinstance(self.returned_obj.info(), email.message.Message))
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertIsNone(self.returned_obj.getcode())
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison
for line in self.returned_obj.__iter__():
self.assertEqual(line, self.text)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in list(os.environ):
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.request.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
class urlopen_HttpTests(unittest.TestCase):
"""Test urlopen() opening a fake http connection."""
def fakehttp(self, fakedata):
class FakeSocket(io.BytesIO):
def sendall(self, str): pass
def makefile(self, *args, **kwds):
return self
def read(self, amt=None):
if self.closed: return b""
return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed: return b""
return io.BytesIO.readline(self, length)
class FakeHTTPConnection(http.client.HTTPConnection):
def connect(self):
self.sock = FakeSocket(fakedata)
self._connection_class = http.client.HTTPConnection
http.client.HTTPConnection = FakeHTTPConnection
def unfakehttp(self):
http.client.HTTPConnection = self._connection_class
def test_read(self):
self.fakehttp(b"Hello!")
try:
fp = urlopen("http://python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_read_bogus(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(IOError, urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises IOError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp(b'')
try:
self.assertRaises(IOError, urlopen, "http://something")
finally:
self.unfakehttp()
def test_userpass_inurl(self):
self.fakehttp(b"Hello!")
try:
fp = urlopen("http://user:pass@python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://user:pass@python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(support.TESTFN)
self.text = b'testing urllib.urlretrieve'
try:
FILE = open(support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
return "file://%s" % urllib.request.pathname2url(
os.path.abspath(filePath))
def createNewTempFile(self, data=b""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.request.urlretrieve("file:%s" % support.TESTFN)
self.assertEqual(result[0], support.TESTFN)
self.assertTrue(isinstance(result[1], email.message.Message),
"did not get a email.message.Message instance "
"as second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.request.urlretrieve(self.constructLocalFileUrl(
support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = open(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(count, block_size, total_size, count_holder=[0]):
self.assertTrue(isinstance(count, int))
self.assertTrue(isinstance(block_size, int))
self.assertTrue(isinstance(total_size, int))
self.assertEqual(count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.request.urlretrieve(
self.constructLocalFileUrl(support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile()
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read). Since the block size is 8192 bytes, only one block read is
# required to read the entire file.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile(b"x" * 5)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile(b"x" * 8193)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 8193)
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 (Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>.
The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a
character properly. Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.parse.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %r != %r" % (do_not_quote, result))
result = urllib.parse.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %r != %r" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.parse.quote.__defaults__[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.parse.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
result = urllib.parse.quote_plus(quote_by_default,
safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %r != %r" %
(quote_by_default, result))
# Safe expressed as bytes rather than str
result = urllib.parse.quote(quote_by_default, safe=b"<>")
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
# "Safe" non-ASCII characters should have no effect
# (Since URIs are not allowed to have non-ASCII characters)
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
# Same as above, but using a bytes rather than str
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.parse.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): "
"%s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.parse.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.parse.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %r != %r" % (expected, result))
self.assertEqual(expected, result,
"using quote_plus(): %r != %r" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.parse.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %r != %r" % (result, hexescape(' ')))
result = urllib.parse.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %r != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.parse.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
# Test with bytes
self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),
'alpha%2Bbeta+gamma')
# Test with safe bytes
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),
'alpha+beta+gamma')
def test_quote_bytes(self):
# Bytes should quote directly to percent-encoded values
given = b"\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Encoding argument should raise type error on bytes input
self.assertRaises(TypeError, urllib.parse.quote, given,
encoding="latin-1")
# quote_from_bytes should work the same
result = urllib.parse.quote_from_bytes(given)
self.assertEqual(expect, result,
"using quote_from_bytes(): %r != %r"
% (expect, result))
def test_quote_with_unicode(self):
# Characters in Latin-1 range, encoded by default in UTF-8
given = "\xa2\xd8ab\xff"
expect = "%C2%A2%C3%98ab%C3%BF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded by with None (default)
result = urllib.parse.quote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded with Latin-1
given = "\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded by default in UTF-8
given = "\u6f22\u5b57" # "Kanji"
expect = "%E6%BC%A2%E5%AD%97"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with Latin-1
given = "\u6f22\u5b57"
self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,
encoding="latin-1")
# Characters in BMP, encoded with Latin-1, with replace error handling
given = "\u6f22\u5b57"
expect = "%3F%3F" # "??"
result = urllib.parse.quote(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, Latin-1, with xmlcharref error handling
given = "\u6f22\u5b57"
expect = "%26%2328450%3B%26%2323383%3B" # "漢字"
result = urllib.parse.quote(given, encoding="latin-1",
errors="xmlcharrefreplace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
def test_quote_plus_with_unicode(self):
# Encoding (latin-1) test for quote_plus
given = "\xa2\xd8 \xff"
expect = "%A2%D8+%FF"
result = urllib.parse.quote_plus(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
# Errors test for quote_plus
given = "ab\u6f22\u5b57 cd"
expect = "ab%3F%3F+cd"
result = urllib.parse.quote_plus(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.parse.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ())
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'')
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
# unquote_to_bytes
given = '%xab'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%x'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ())
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = b'\xab\xea'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquote_to_bytes(self):
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = b'br\xc3\xbcckner_sapporo_20050930.doc'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test on a string with unescaped non-ASCII characters
# (Technically an invalid URI; expect those characters to be UTF-8
# encoded).
result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC")
expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc"
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input
given = b'%A2%D8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input, with unescaped non-ASCII bytes
# (Technically an invalid URI; expect those bytes to be preserved)
given = b'%A2\xd8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquote_with_unicode(self):
# Characters in the Latin-1 range, encoded with UTF-8
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = 'br\u00fcckner_sapporo_20050930.doc'
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with None (default)
result = urllib.parse.unquote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with Latin-1
result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',
encoding="latin-1")
expect = 'br\u00fcckner_sapporo_20050930.doc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with UTF-8
given = "%E6%BC%A2%E5%AD%97"
expect = "\u6f22\u5b57" # "Kanji"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence
given = "%F3%B1"
expect = "\ufffd" # Replacement character
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, replace errors
result = urllib.parse.unquote(given, errors="replace")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, ignoring errors
given = "%F3%B1"
expect = ""
result = urllib.parse.unquote(given, errors="ignore")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, UTF-8
result = urllib.parse.unquote("\u6f22%C3%BC")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, Latin-1
# (Note, the string contains non-Latin-1-representable characters)
result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.parse.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3']))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
def test_empty_sequence(self):
self.assertEqual("", urllib.parse.urlencode({}))
self.assertEqual("", urllib.parse.urlencode([]))
def test_nonstring_values(self):
self.assertEqual("a=1", urllib.parse.urlencode({"a": 1}))
self.assertEqual("a=None", urllib.parse.urlencode({"a": None}))
def test_nonstring_seq_values(self):
self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True))
self.assertEqual("a=None&a=a",
urllib.parse.urlencode({"a": [None, "a"]}, True))
self.assertEqual("a=a&a=b",
urllib.parse.urlencode({"a": {"a": 1, "b": 1}}, True))
def test_urlencode_encoding(self):
# ASCII encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Default is UTF-8 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
# Latin-1 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_encoding_doseq(self):
# ASCII Encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, doseq=True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# ASCII Encoding. On a sequence of values.
given = (("\u00a0", (1, "\u00c1")),)
expect = '%3F=1&%3F=%3F'
result = urllib.parse.urlencode(given, True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Utf-8
given = (("\u00a0", "\u00c1"),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%C2%A0=42&%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# latin-1
given = (("\u00a0", "\u00c1"),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%A0=42&%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_bytes(self):
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0%24=%C1%24'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# Sequence of values
given = ((b'\xa0\x24', (42, b'\xc1\x24')),)
expect = '%A0%24=42&%A0%24=%C1%24'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
def test_urlencode_encoding_safe_parameter(self):
# Send '$' (\x24) as safe character
# Default utf-8 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, doseq=True, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
# Safe parameter in sequence
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$")
self.assertEqual(expect, result)
# Test all above in latin-1 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$",
encoding="latin-1")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0$=%C1$'
result = urllib.parse.urlencode(given, doseq=True, safe=":$",
encoding="latin-1")
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$",
encoding="latin-1")
self.assertEqual(expect, result)
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.parse.quote("make sure")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_splitpasswd(self):
"""Some of password examples are not sensible, but it is added to
confirming to RFC2617 and addressing issue4675.
"""
self.assertEqual(('user', 'ab'),urllib.parse.splitpasswd('user:ab'))
self.assertEqual(('user', 'a\nb'),urllib.parse.splitpasswd('user:a\nb'))
self.assertEqual(('user', 'a\tb'),urllib.parse.splitpasswd('user:a\tb'))
self.assertEqual(('user', 'a\rb'),urllib.parse.splitpasswd('user:a\rb'))
self.assertEqual(('user', 'a\fb'),urllib.parse.splitpasswd('user:a\fb'))
self.assertEqual(('user', 'a\vb'),urllib.parse.splitpasswd('user:a\vb'))
self.assertEqual(('user', 'a:b'),urllib.parse.splitpasswd('user:a:b'))
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.request.URLopener):
def open_spam(self, url):
return url
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, someteimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic enviroments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen(5)
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
def test_main():
support.run_unittest(
urlopen_FileTests,
urlopen_HttpTests,
urlretrieve_FileTests,
ProxyTests,
QuotingTests,
UnquotingTests,
urlencode_Tests,
Pathname_Tests,
Utility_Tests,
URLopener_Tests,
#FTPWrapperTests,
)
if __name__ == '__main__':
test_main()
|
test_pdb.py
|
# A test suite for pdb; not very comprehensive at the moment.
import doctest
import os
import pdb
import sys
import types
import codecs
import unittest
import subprocess
import textwrap
import linecache
from contextlib import ExitStack
from io import StringIO
from test.support import os_helper
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
from unittest.mock import patch
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function3(arg=None, *, kwonly=None):
... pass
>>> def test_function4(a, b, c, /):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
... test_function3(kwonly=True)
... test_function4(1, 2, 3)
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'next', # step to test_function3()
... 'step', # stepping into test_function3()
... 'args', # display function args
... 'return', # return out of function
... 'next', # step to test_function4()
... 'step', # stepping to test_function4()
... 'args', # display function args
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[4]>(25)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(4)test_function()
-> test_function3(kwonly=True)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(1)test_function3()
-> def test_function3(arg=None, *, kwonly=None):
(Pdb) args
arg = None
kwonly = True
(Pdb) return
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(2)test_function3()->None
-> pass
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(5)test_function()
-> test_function4(1, 2, 3)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[2]>(1)test_function4()
-> def test_function4(a, b, c, /):
(Pdb) args
a = 1
b = 2
c = 3
(Pdb) continue
BAZ
"""
def reset_Breakpoint():
import bdb
bdb.Breakpoint.clearBreakpoints()
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> reset_Breakpoint()
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'commands 10', # out of range
... 'commands a', # display help
... 'commands 4', # already deleted
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) commands 10
*** cannot set commands: Breakpoint number 10 out of range
(Pdb) commands a
*** Usage: commands [bnum]
...
end
(Pdb) commands 4
*** cannot set commands: Breakpoint 4 already deleted
(Pdb) continue
3
4
"""
def test_pdb_breakpoints_preserved_across_interactive_sessions():
"""Breakpoints are remembered between interactive sessions
>>> reset_Breakpoint()
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'import test.test_pdb',
... 'break test.test_pdb.do_something',
... 'break test.test_pdb.do_nothing',
... 'break',
... 'continue',
... ]):
... pdb.run('print()')
> <string>(1)<module>()...
(Pdb) import test.test_pdb
(Pdb) break test.test_pdb.do_something
Breakpoint 1 at ...test_pdb.py:...
(Pdb) break test.test_pdb.do_nothing
Breakpoint 2 at ...test_pdb.py:...
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep yes at ...test_pdb.py:...
2 breakpoint keep yes at ...test_pdb.py:...
(Pdb) continue
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'break',
... 'break pdb.find_function',
... 'break',
... 'clear 1',
... 'continue',
... ]):
... pdb.run('print()')
> <string>(1)<module>()...
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep yes at ...test_pdb.py:...
2 breakpoint keep yes at ...test_pdb.py:...
(Pdb) break pdb.find_function
Breakpoint 3 at ...pdb.py:97
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep yes at ...test_pdb.py:...
2 breakpoint keep yes at ...test_pdb.py:...
3 breakpoint keep yes at ...pdb.py:...
(Pdb) clear 1
Deleted breakpoint 1 at ...test_pdb.py:...
(Pdb) continue
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'break',
... 'clear 2',
... 'clear 3',
... 'continue',
... ]):
... pdb.run('print()')
> <string>(1)<module>()...
(Pdb) break
Num Type Disp Enb Where
2 breakpoint keep yes at ...test_pdb.py:...
3 breakpoint keep yes at ...pdb.py:...
(Pdb) clear 2
Deleted breakpoint 2 at ...test_pdb.py:...
(Pdb) clear 3
Deleted breakpoint 3 at ...pdb.py:...
(Pdb) continue
"""
def test_pdb_pp_repr_exc():
"""Test that do_p/do_pp do not swallow exceptions.
>>> class BadRepr:
... def __repr__(self):
... raise Exception('repr_exc')
>>> obj = BadRepr()
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'p obj',
... 'pp obj',
... 'continue',
... ]):
... test_function()
--Return--
> <doctest test.test_pdb.test_pdb_pp_repr_exc[2]>(2)test_function()->None
-> import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
(Pdb) p obj
*** Exception: repr_exc
(Pdb) pp obj
*** Exception: repr_exc
(Pdb) continue
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_pdb_whatis_command():
"""Test the whatis command
>>> myvar = (1,2)
>>> def myfunc():
... pass
>>> class MyClass:
... def mymethod(self):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'whatis myvar',
... 'whatis myfunc',
... 'whatis MyClass',
... 'whatis MyClass()',
... 'whatis MyClass.mymethod',
... 'whatis MyClass().mymethod',
... 'continue',
... ]):
... test_function()
--Return--
> <doctest test.test_pdb.test_pdb_whatis_command[3]>(2)test_function()->None
-> import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
(Pdb) whatis myvar
<class 'tuple'>
(Pdb) whatis myfunc
Function myfunc
(Pdb) whatis MyClass
Class test.test_pdb.MyClass
(Pdb) whatis MyClass()
<class 'test.test_pdb.MyClass'>
(Pdb) whatis MyClass.mymethod
Function mymethod
(Pdb) whatis MyClass().mymethod
Method mymethod
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True, readrc=False).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True, readrc=False).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True, readrc=False)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
getattr(pdb.Pdb(nosigint=True, readrc=False), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> reset_Breakpoint()
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_next_command_for_coroutine():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(4)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
Internal StopIteration
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()->None
-> await test_coro()
(Pdb) continue
finished
"""
def test_pdb_next_command_for_asyncgen():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def agen():
... yield 1
... await asyncio.sleep(0)
... yield 2
>>> async def test_coro():
... async for x in agen():
... print(x)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[3]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(3)test_coro()
-> print(x)
(Pdb) next
1
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(2)agen()
-> yield 1
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(3)agen()
-> await asyncio.sleep(0)
(Pdb) continue
2
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(9)test_function()
-> if ex.value != 1:
(Pdb) continue
finished
"""
def test_pdb_return_command_for_coroutine():
"""Testing no unwindng stack on yield for coroutines for "return" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoint is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_until_command_for_coroutine():
"""Testing no unwindng stack for coroutines
for "until" command if target breakpoint is not reached
>>> import asyncio
>>> async def test_coro():
... print(0)
... await asyncio.sleep(0)
... print(1)
... await asyncio.sleep(0)
... print(2)
... await asyncio.sleep(0)
... print(3)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 8',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) until 8
0
1
2
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(8)test_coro()
-> print(3)
(Pdb) continue
3
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controlled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> reset_Breakpoint()
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 1 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_issue_20766():
"""Test for reference leaks when the SIGINT handler is set.
>>> def test_function():
... i = 1
... while i <= 2:
... sess = pdb.Pdb()
... sess.set_trace(sys._getframe())
... print('pdb %d: %s' % (i, sess._previous_sigint_handler))
... i += 1
>>> reset_Breakpoint()
>>> with PdbTestInput(['continue',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 1: <built-in function default_int_handler>
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 2: <built-in function default_int_handler>
"""
def test_pdb_issue_43318():
"""echo breakpoints cleared with filename:lineno
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
>>> reset_Breakpoint()
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'clear <doctest test.test_pdb.test_pdb_issue_43318[0]>:3',
... 'continue'
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_43318[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_issue_43318[0]>:3
(Pdb) clear <doctest test.test_pdb.test_pdb_issue_43318[0]>:3
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_issue_43318[0]>:3
(Pdb) continue
1
2
3
4
"""
class PdbTestCase(unittest.TestCase):
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def _run_pdb(self, pdb_args, commands):
self.addCleanup(os_helper.rmtree, '__pycache__')
cmd = [sys.executable, '-m', 'pdb'] + pdb_args
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env = {**os.environ, 'PYTHONIOENCODING': 'utf-8'}
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def run_pdb_script(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(os_helper.unlink, filename)
return self._run_pdb([filename], commands)
def run_pdb_module(self, script, commands):
"""Runs the script code as part of a module"""
self.module_name = 't_main'
os_helper.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
pass
with open(main_file, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(os_helper.rmtree, self.module_name)
return self._run_pdb(['-m', self.module_name], commands)
def _assert_find_function(self, file_content, func_name, expected):
with open(os_helper.TESTFN, 'wb') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], os_helper.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, os_helper.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function(b'', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode(),
'bœr',
('bœr', 4),
)
def test_find_function_found_with_encoding_cookie(self):
self._assert_find_function(
"""\
# coding: iso-8859-15
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode('iso-8859-15'),
'bœr',
('bœr', 5),
)
def test_find_function_found_with_bom(self):
self._assert_find_function(
codecs.BOM_UTF8 + """\
def bœr():
pass
""".encode(),
'bœr',
('bœr', 1),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(os_helper.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', os_helper.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(os_helper.unlink, 'bar.py')
stdout, stderr = self.run_pdb_script(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13120(self):
# Invoking "continue" on a non-main thread triggered an exception
# inside signal.signal.
with open(os_helper.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb(readrc=False).set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', os_helper.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={**os.environ, 'PYTHONIOENCODING': 'utf-8'}
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn(b'Error', stdout,
"Got an error running test script under PDB")
def test_issue36250(self):
with open(os_helper.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
evt = threading.Event()
def start_pdb():
evt.wait()
pdb.Pdb(readrc=False).set_trace()
t = threading.Thread(target=start_pdb)
t.start()
pdb.Pdb(readrc=False).set_trace()
evt.set()
t.join()""").encode('ascii'))
cmd = [sys.executable, '-u', os_helper.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env = {**os.environ, 'PYTHONIOENCODING': 'utf-8'}
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\ncont\n')
self.assertNotIn(b'Error', stdout,
"Got an error running test script under PDB")
def test_issue16180(self):
# A syntax error in the debuggee.
script = "def f: pass\n"
commands = ''
expected = "SyntaxError:"
stdout, stderr = self.run_pdb_script(script, commands)
self.assertIn(expected, stdout,
'\n\nExpected:\n{}\nGot:\n{}\n'
'Fail to handle a syntax error in the debuggee.'
.format(expected, stdout))
def test_issue26053(self):
# run command of pdb prompt echoes the correct args
script = "print('hello')"
commands = """
continue
run a b c
run d e f
quit
"""
stdout, stderr = self.run_pdb_script(script, commands)
res = '\n'.join([x.strip() for x in stdout.splitlines()])
self.assertRegex(res, "Restarting .* with arguments:\na b c")
self.assertRegex(res, "Restarting .* with arguments:\nd e f")
def test_readrc_kwarg(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb(readrc=False).set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with os_helper.temp_cwd():
with open('.pdbrc', 'w') as f:
f.write("invalid\n")
with open('main.py', 'w') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
with proc:
stdout, stderr = proc.communicate(b'q\n')
self.assertNotIn(b"NameError: name 'invalid' is not defined",
stdout)
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_readrc_homedir(self):
save_home = os.environ.pop("HOME", None)
with os_helper.temp_dir() as temp_dir, patch("os.path.expanduser"):
rc_path = os.path.join(temp_dir, ".pdbrc")
os.path.expanduser.return_value = rc_path
try:
with open(rc_path, "w") as f:
f.write("invalid")
self.assertEqual(pdb.Pdb().rcLines[0], "invalid")
finally:
if save_home is not None:
os.environ["HOME"] = save_home
def test_read_pdbrc_with_ascii_encoding(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb().set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with os_helper.temp_cwd():
with open('.pdbrc', 'w', encoding='utf-8') as f:
f.write("Fran\u00E7ais")
with open('main.py', 'w', encoding='utf-8') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
env = {'PYTHONIOENCODING': 'ascii'}
if sys.platform == 'win32':
env['PYTHONLEGACYWINDOWSSTDIO'] = 'non-empty-string'
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
env={**os.environ, **env}
)
with proc:
stdout, stderr = proc.communicate(b'c\n')
self.assertIn(b"UnicodeEncodeError: \'ascii\' codec can\'t encode character "
b"\'\\xe7\' in position 21: ordinal not in range(128)", stderr)
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_header(self):
stdout = StringIO()
header = 'Nobody expects... blah, blah, blah'
with ExitStack() as resources:
resources.enter_context(patch('sys.stdout', stdout))
resources.enter_context(patch.object(pdb.Pdb, 'set_trace'))
pdb.set_trace(header=header)
self.assertEqual(stdout.getvalue(), header + '\n')
def test_run_module(self):
script = """print("SUCCESS")"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_module_is_run_as_main(self):
script = """
if __name__ == '__main__':
print("SUCCESS")
"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_breakpoint(self):
script = """
if __name__ == '__main__':
pass
print("SUCCESS")
pass
"""
commands = """
b 3
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("Breakpoint 1 at" in l for l in stdout.splitlines()), stdout)
self.assertTrue(all("SUCCESS" not in l for l in stdout.splitlines()), stdout)
def test_run_pdb_with_pdb(self):
commands = """
c
quit
"""
stdout, stderr = self._run_pdb(["-m", "pdb"], commands)
self.assertIn(
pdb._usage,
stdout.replace('\r', '') # remove \r for windows
)
def test_module_without_a_main(self):
module_name = 't_main'
os_helper.rmtree(module_name)
init_file = module_name + '/__init__.py'
os.mkdir(module_name)
with open(init_file, 'w') as f:
pass
self.addCleanup(os_helper.rmtree, module_name)
stdout, stderr = self._run_pdb(['-m', module_name], "")
self.assertIn("ImportError: No module named t_main.__main__",
stdout.splitlines())
def test_package_without_a_main(self):
pkg_name = 't_pkg'
module_name = 't_main'
os_helper.rmtree(pkg_name)
modpath = pkg_name + '/' + module_name
os.makedirs(modpath)
with open(modpath + '/__init__.py', 'w') as f:
pass
self.addCleanup(os_helper.rmtree, pkg_name)
stdout, stderr = self._run_pdb(['-m', modpath.replace('/', '.')], "")
self.assertIn(
"'t_pkg.t_main' is a package and cannot be directly executed",
stdout)
def test_blocks_at_first_code_line(self):
script = """
#This is a comment, on line 2
print("SUCCESS")
"""
commands = """
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("__main__.py(4)<module>()"
in l for l in stdout.splitlines()), stdout)
def test_relative_imports(self):
self.module_name = 't_main'
os_helper.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(os_helper.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import top_var
from .module import var
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
var2 = "second var"
"""))
commands = """
b 5
c
p top_var
p var
p module.var2
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
self.assertTrue(any("VAR from top" in l for l in stdout.splitlines()))
self.assertTrue(any("second var" in l for l in stdout.splitlines()))
def test_relative_imports_on_plain_module(self):
# Validates running a plain module. See bpo32691
self.module_name = 't_main'
os_helper.rmtree(self.module_name)
main_file = self.module_name + '/runme.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(os_helper.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
"""))
commands = """
b 3
c
p module.var
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name + '.runme'], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
def test_errors_in_command(self):
commands = "\n".join([
'print(',
'debug print(',
'debug doesnotexist',
'c',
])
stdout, _ = self.run_pdb_script('pass', commands + '\n')
self.assertEqual(stdout.splitlines()[1:], [
'-> pass',
'(Pdb) *** SyntaxError: \'(\' was never closed',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'*** SyntaxError: \'(\' was never closed',
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'> <string>(1)<module>()',
"((Pdb)) *** NameError: name 'doesnotexist' is not defined",
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ',
])
def test_issue34266(self):
'''do_run handles exceptions from parsing its arg'''
def check(bad_arg, msg):
commands = "\n".join([
f'run {bad_arg}',
'q',
])
stdout, _ = self.run_pdb_script('pass', commands + '\n')
self.assertEqual(stdout.splitlines()[1:], [
'-> pass',
f'(Pdb) *** Cannot run {bad_arg}: {msg}',
'(Pdb) ',
])
check('\\', 'No escaped character')
check('"', 'No closing quotation')
def test_issue42384(self):
'''When running `python foo.py` sys.path[0] is an absolute path. `python -m pdb foo.py` should behave the same'''
script = textwrap.dedent("""
import sys
print('sys.path[0] is', sys.path[0])
""")
commands = 'c\nq'
with os_helper.temp_cwd() as cwd:
expected = f'(Pdb) sys.path[0] is {os.path.realpath(cwd)}'
stdout, stderr = self.run_pdb_script(script, commands)
self.assertEqual(stdout.split('\n')[2].rstrip('\r'), expected)
@os_helper.skip_unless_symlink
def test_issue42384_symlink(self):
'''When running `python foo.py` sys.path[0] resolves symlinks. `python -m pdb foo.py` should behave the same'''
script = textwrap.dedent("""
import sys
print('sys.path[0] is', sys.path[0])
""")
commands = 'c\nq'
with os_helper.temp_cwd() as cwd:
cwd = os.path.realpath(cwd)
dir_one = os.path.join(cwd, 'dir_one')
dir_two = os.path.join(cwd, 'dir_two')
expected = f'(Pdb) sys.path[0] is {dir_one}'
os.mkdir(dir_one)
with open(os.path.join(dir_one, 'foo.py'), 'w') as f:
f.write(script)
os.mkdir(dir_two)
os.symlink(os.path.join(dir_one, 'foo.py'), os.path.join(dir_two, 'foo.py'))
stdout, stderr = self._run_pdb([os.path.join('dir_two', 'foo.py')], commands)
self.assertEqual(stdout.split('\n')[2].rstrip('\r'), expected)
def test_issue42383(self):
with os_helper.temp_cwd() as cwd:
with open('foo.py', 'w') as f:
s = textwrap.dedent("""
print('The correct file was executed')
import os
os.chdir("subdir")
""")
f.write(s)
subdir = os.path.join(cwd, 'subdir')
os.mkdir(subdir)
os.mkdir(os.path.join(subdir, 'subdir'))
wrong_file = os.path.join(subdir, 'foo.py')
with open(wrong_file, 'w') as f:
f.write('print("The wrong file was executed")')
stdout, stderr = self._run_pdb(['foo.py'], 'c\nc\nq')
expected = '(Pdb) The correct file was executed'
self.assertEqual(stdout.split('\n')[6].rstrip('\r'), expected)
class ChecklineTests(unittest.TestCase):
def setUp(self):
linecache.clearcache() # Pdb.checkline() uses linecache.getline()
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def test_checkline_before_debugging(self):
with open(os_helper.TESTFN, "w") as f:
f.write("print(123)")
db = pdb.Pdb()
self.assertEqual(db.checkline(os_helper.TESTFN, 1), 1)
def test_checkline_after_reset(self):
with open(os_helper.TESTFN, "w") as f:
f.write("print(123)")
db = pdb.Pdb()
db.reset()
self.assertEqual(db.checkline(os_helper.TESTFN, 1), 1)
def test_checkline_is_not_executable(self):
with open(os_helper.TESTFN, "w") as f:
# Test for comments, docstrings and empty lines
s = textwrap.dedent("""
# Comment
\"\"\" docstring \"\"\"
''' docstring '''
""")
f.write(s)
db = pdb.Pdb()
num_lines = len(s.splitlines()) + 2 # Test for EOF
for lineno in range(num_lines):
self.assertFalse(db.checkline(os_helper.TESTFN, lineno))
def load_tests(*args):
from test import test_pdb
suites = [
unittest.makeSuite(PdbTestCase),
unittest.makeSuite(ChecklineTests),
doctest.DocTestSuite(test_pdb)
]
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main()
|
Video_AudioThread_3D.py
|
import threading
import cv2
import pyaudio
import wave
import time
import numpy as np
import matplotlib.pyplot as plt
import math
# DEFINING AUDIO AND VIDEO RECORDING FUNCTIONS
def AudioRec(FORMAT,CHANNELS,RATE,CHUNK):
WAVE_OUTPUT_FILENAME = "Audio1.wav"
RECORD_SECONDS = 5
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("Recording...")
frame = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frame.append(data)
print("Stopped Recording")
stream.stop_stream()
stream.close()
p.terminate()
waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(p.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
waveFile.writeframes(b''.join(frame))
waveFile.close()
return
def VideoRec():
cap = cv2.VideoCapture(1)
cap.set(3, 160)
cap.set(4, 120)
cap2 = cv2.VideoCapture(2)
cap2.set(3, 160)
cap2.set(4, 120)
w1 = int(cap.get(3))
h1 = int(cap.get(4))
print w1, h1
# Checking if Camera has Opened Properly
if (cap.isOpened() == False):
print('Camera 1 Not Found')
if (cap2.isOpened() == False):
print("Camera 2 Not Found")
# function for manual correlation
def man_corr(a, b):
n1 = len(a)
n2 = len(b)
a_copy = np.zeros(n1)
b_copy = np.zeros(n2)
# Calculating square
sq_sum1 = 0
sq_sum2 = 0
for i in range(0, n1):
a_copy[i] = a[i] ** 2
sq_sum1 = sq_sum1 + a_copy[i]
# print sq_sum1,a_copy
for i in range(0, n2):
b_copy[i] = b[i] ** 2
sq_sum2 = sq_sum2 + b_copy[i]
# print sq_sum2,b_copy
sum = 0
r = np.zeros(1)
s = n1
diff = 1
if (n1 != n2):
if n1 > n2:
diff = n1 - n2 + 1
r = np.zeros(diff)
s = n1
for q in range(0, n1 - n2):
b.append(0)
else:
diff = n2 - n1 + 1
s = n2
r = np.zeros(diff)
for q in range(0, n2 - n1):
a.insert(0, 0)
# print a,b
for l in range(0, diff):
for n in range(0, s):
if n - l >= 0:
sum = sum + (a[n] * b[n - l])
r[l] = sum / math.sqrt(sq_sum1 * sq_sum2)
sum = 0
return r
# Function for splitting frames
def Split(Frame, cam_no, currentFrame):
name = 'Frame ' + str(cam_no) + '_' + str(currentFrame) + '.jpg'
print('Creating...' + name)
cv2.imwrite(name, Frame)
return
# Function of getting differences and blended frames
def Rec_Play(delay, frameC):
diff_count = 0
if (delay > 0):
for i in range(0, frameC - delay):
cam1 = cv2.imread('Frame 1_' + str(i) + '.jpg')
cam2 = cv2.imread('Frame 2_' + str(i + delay) + '.jpg')
diff = cv2.subtract(cam1, cam2)
print('Creating Difference frame ' + str(diff_count))
cv2.imwrite('Difference_Frame ' + str(diff_count) + '.jpg', diff)
blend = cv2.add(cam1, diff)
print('Creating Blended frame ' + str(diff_count))
cv2.imwrite('Blended_Frame ' + str(diff_count) + '.jpg', blend)
diff_count += 1
print diff_count
else:
delay1 = abs(delay)
for i in range(0, frameC - delay1):
cam2 = cv2.imread('Frame 2_' + str(i) + '.jpg')
cam1 = cv2.imread('Frame 1_' + str(i + delay1) + '.jpg')
diff = cv2.subtract(cam2, cam1)
print('Creating Difference frame ' + str(diff_count))
cv2.imwrite('Difference_Frame ' + str(diff_count) + '.jpg', diff)
blend = cv2.add(cam2, diff)
print('Creating Blended frame ' + str(diff_count))
cv2.imwrite('Blended_Frame ' + str(diff_count) + '.jpg', blend)
diff_count += 1
print diff_count
return diff_count
# Function to get Blended frames of camera 1 and difference
def Merge(bframe):
out3 = cv2.VideoWriter('Output.avi', cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'), 25, (w1, h1))
currentFrame = 0
while (currentFrame <= bframe):
frame = cv2.imread('Blended_Frame ' + str(currentFrame) + '.jpg')
out3.write(frame)
currentFrame += 1
return
# Function to perform Correlation
def Correlation(image1, image2):
img = cv2.imread(image1)
img2 = cv2.imread(image2)
ha, wa, bppa = np.shape(img)
hb, wb, bppb = np.shape(img2)
sum_matrix = 0
sum_matrix1 = 0
img_mean1 = img2
img_mean = img
for i in range(0, ha):
for j in range(0, wa):
m = img[i][j]
ps = m[0] + m[1] + m[2]
pavg = ps / 3
img_mean[i][j] = pavg
sum_matrix = sum_matrix + pavg
mean1 = sum_matrix / (ha * wa)
img_mean = img_mean / mean1
## normalization for image 2
for i in range(0, hb):
for j in range(0, wb):
m = img2[i][j]
ps = m[0] + m[1] + m[2]
pavg = ps / 3
img_mean1[i][j] = pavg
sum_matrix1 = sum_matrix1 + pavg
mean2 = sum_matrix1 / (hb * wb)
img_mean1 = img_mean1 / mean2
# print mean2
# print sum_matrix1
# print img_mean1
# Converting 2D image to 1-D vector
# adding column pixels
f_mat1 = np.zeros(wa) # The final 1D matrix of image 1
c_sum = 0
for p in range(0, wa):
for q in range(0, ha):
e = img_mean[q][p]
c_sum = c_sum + e[0]
f_mat1[p] = c_sum
c_sum = 0
# Converting 2D image2 to 1D vector
f_mat2 = np.zeros(wb)
c_sum = 0
for p in range(0, wb):
for q in range(0, hb):
e = img_mean1[q][p]
c_sum = c_sum + e[0]
f_mat2[p] = c_sum # THe final 1D matrix of image 2
c_sum = 0
correlation = man_corr(f_mat1, f_mat2)
return correlation
# print np.corrcoef(f_mat1,f_mat2)
# Creating VideoWriter Object for camera 1 and camera 2
out1 = cv2.VideoWriter('Cam1_out.avi', cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'), 25, (w1, h1))
out2 = cv2.VideoWriter('Cam2_out.avi', cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'), 25, (w1, h1))
currentFrame = 0
frame_count = 0
# Loop for capturing and displaying frame by frame
while (1):
ret, frame = cap.read()
if ret:
out1.write(frame)
Split(frame, 1, currentFrame)
cv2.imshow('Frame1', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyWindow('Frame1')
elif cv2.waitKey(1) == 27:
break
ret, frame2 = cap2.read()
if ret:
out2.write(frame2)
Split(frame2, 2, currentFrame)
cv2.imshow('Frame2', frame2)
if cv2.waitKey(1) & 0xFF == ord('e'):
cap2.release()
cv2.destroyWindow('Frame2')
elif cv2.waitKey(1) == 27:
break
currentFrame += 1
frame_count += 1
out1.release()
out2.release()
cap.release()
cap2.release()
cv2.destroyAllWindows()
print('Number of Frames = ' + str(frame_count))
x = np.zeros(frame_count)
for p in range(0, frame_count):
print('Iteration ' + str(p + 1))
x[p] = Correlation('Frame 1_' + str(frame_count / 2) + '.jpg', 'Frame 2_' + str(p) + '.jpg')
print('Correlation array:')
print x
max = np.max(x)
print('Maximum Correlation :' + str(max))
max_ind = list(x).index(max)
print ('Index of Max: ' + str(max_ind))
delay = max_ind + 1 - (frame_count / 2)
if (delay > 0):
print('Cam 2 lagging')
print('Frame Delay= ' + str(delay))
else:
print('Cam 1 lagging')
print('Frame Delay=' + str(delay))
Bframe_no = Rec_Play(delay, frame_count)
Merge(Bframe_no)
plt.plot(x)
plt.show()
return
if __name__ == "__main__":
t1=threading.Thread(target = AudioRec, args=(pyaudio.paInt16, 2, 44100, 1024, ))
t2=threading.Thread(target = VideoRec)
t1.start()
t2.start()
t1.join()
t2.join()
|
gui.py
|
import Tkinter
from Tkinter import *
import tkMessageBox
import subprocess
from subprocess import call
import sys
import csv
from collections import deque
from threading import Thread
#from threading import Timer
import test_rpca_ec
import os
from os.path import abspath
import Queue
import time
import PIL
from PIL import ImageTk, Image
#Error Check Initializations
errortime=[] #Buffer for storing time-stamp of the error
faultyspnd = [] #Buffer for storing the faulty spnd number
raw = [] #Buffer for storing raw value of the faulty spnd
faultEst = [] #Buffer for storing fault estimate
cnt = 0 #Count for errors that are consecutive in time
z = 0 #Flag for incrementing cnt
l = 0 #Used for ensuring Start button works only once during runtime
f1=0
f2=0 ###############################################################################################################################################################
#Initialization
def initialization():
'''This is where the initial GUI's functions are present'''
#Creating a log file Data.csv only once for storing all parameter values from the user, if the file does not exist
if not os.path.exists('Data.csv'):
with open('Data.csv', 'w') as x:
w = csv.writer(x, delimiter = ',')
w.writerow(['threshVarCap', 'Window Width', 'Alpha', 'rDataMat', 'cDataMat', 'Time', 'error'])
w.writerow([0.01, 20, 0.05, 20000, 5, 1, 10])
x.close
#Creating a log file for storing all the outputs of the RPCA file in terms of Time, Faulty SPND, Raw Value and Fault Estimate
with open('AllLog.csv', 'w') as v:
w = csv.writer(v, delimiter = ',')
w.writerow(['Time', 'Faulty SPND', 'Raw Value', 'Fault Estimate'])
v.close()
def Save(j):
'''
Here the Save button function is defined
If all the fields are filled, 'if' condition is valid and all the values are saved in Data.csv file
'''
global f1
f1 += j
if f1 == 1:
if ((len(threshVarCap.get())!=0)&(len(winndowWidth.get())!=0)&(len(alpha.get())!=0)&(len(rDataMat.get())!=0)&(len(cDataMat.get())!=0)&(len(Time.get())!=0)&(len(error.get())!=0)):
with open('Data.csv', 'a') as x:
w = csv.writer(x, delimiter = ',')
w.writerow([threshVarCap.get(),winndowWidth.get(),alpha.get(),rDataMat.get(),cDataMat.get(),Time.get(),error.get()])
x.close()
initial.destroy()
else:
th=threshVarCap.get()
ww=winndowWidth.get()
al=alpha.get()
rD=rDataMat.get()
cD=cDataMat.get()
ti=Time.get()
er=error.get()
with open('Data.csv', 'r') as f:
try:
lastrow = deque(csv.reader(f), 1)[0]
except IndexError: # empty file
lastrow = None
if(len(threshVarCap.get())==0):
th=lastrow[0]
if(len(winndowWidth.get())==0):
ww=lastrow[1]
if(len(alpha.get())==0):
al=lastrow[2]
if(len(rDataMat.get())==0):
rD=lastrow[3]
if(len(cDataMat.get())==0):
cD=lastrow[4]
if(len(Time.get())==0):
ti=lastrow[5]
if(len(error.get())==0):
er=lastrow[6]
with open('Data.csv', 'a') as x:
w = csv.writer(x, delimiter = ',')
w.writerow([th,ww,al,rD,cD,ti,er])
x.close()
def yes():
confirm.destroy()
initial.destroy()
def no():
global f1
f1 = 0
initial.attributes("-topmost", True)
initial.attributes("-topmost", False)
confirm.destroy()
return f1
confirm=Tk()
confirm.attributes("-topmost", True)
confirm.title('Are you sure?')
#confirm.eval('tk::PlaceWindow %s center' % confirm.winfo_pathname(confirm.winfo_id()))
confirm.geometry('+325-500') #Adjust to bring the GUI to the center
Label(confirm, text=" Previous value at the empty field will be used. Continue?",font=("Times 19")).grid(row=0, sticky=W)
yesbutton = Button(confirm, text = ' Yes ', font=("Times 16"), fg='blue', command=yes).grid(row=1, column=0, sticky=W, padx=5, pady=5)
nobutton = Button(confirm, text = ' No ', font=("Times 16"),fg='red', command=no).grid(row=1, column=1, sticky=E, padx=5, pady=5)
confirm.mainloop()
def cancel(k):
global f2
f2 += k
if f2 == 1:
def yes():
confirm2.destroy()
initial.destroy()
def no():
global f2
f2 = 0
initial.attributes("-topmost", True)
initial.attributes("-topmost", False)
confirm2.destroy()
return f2
confirm2=Tk()
confirm2.attributes("-topmost", True)
confirm2.title('Are you sure?')
#confirm.eval('tk::PlaceWindow %s center' % confirm.winfo_pathname(confirm.winfo_id()))
confirm2.geometry('+500-500') #Adjust to bring the GUI to the center
Label(confirm2, text=" Proceed with default values?",font=("Times 19")).grid(row=0, sticky=W)
yesbutton = Button(confirm2, text = ' Yes ', font=("Times 16"), fg='blue', command=yes).grid(row=1, column=0, sticky=W, padx=5, pady=5)
nobutton = Button(confirm2, text = ' No ', font=("Times 16"),fg='red', command=no).grid(row=1, column=1, sticky=E, padx=5, pady=5)
confirm2.mainloop()
initial = Tk()
#initial.eval('tk::PlaceWindow %s center' % initial.winfo_pathname(initial.winfo_id()))
initial.geometry('+350-500') #Adjust to bring the GUI to the center
initial.title('IITB NPCIL BARC BRNS SPND Fault Diagnosal') ############CHANGE HERE FOR NAME###############
with open('Data.csv', 'r') as f:
try:
lastrow = deque(csv.reader(f), 1)[0]
except IndexError: # empty file
lastrow = None
a = StringVar()
b = StringVar()
c = StringVar()
d = StringVar()
e = StringVar()
h = StringVar()
g = StringVar()
a.set(str(lastrow[0]))
b.set(str(lastrow[1]))
c.set(str(lastrow[2]))
d.set(str(lastrow[3]))
e.set(str(lastrow[4]))
h.set(str(lastrow[5]))
g.set(str(lastrow[6]))
#Edit Variable Names below (Left Column of Initialization GUI):
Label(initial, text="threshVarCap:",font=("MS_Serif 15 bold"), fg = '#000b6d').grid(row=1, sticky=W, padx=5, pady=5)
Label(initial, text="Window Length:",font=("MS_Serif 15 bold"), fg = '#000b6d').grid(row=2, sticky=W, padx=5, pady=5)
Label(initial, text="Alpha:",font=("MS_Serif 15 bold"), fg = '#000b6d').grid(row=3, sticky=W, padx=5, pady=5)
Label(initial, text="rDataMat:", font=("MS_Serif 15 bold"), fg = '#000b6d').grid(row=4, sticky=W, padx=5, pady=5)
Label(initial, text="cDataMat:",font=("MS_Serif 15 bold"), fg = '#000b6d').grid(row=5, sticky=W, padx=5, pady=5)
Label(initial, text="Error Freq (sec):",font=("MS_Serif 15 bold"), fg = '#000b6d').grid(row=6, sticky=W, padx=5, pady=5)
Label(initial, text="Error group size:",font=("MS_Serif 15 bold"), fg = '#000b6d').grid(row=7, sticky=W, padx=5, pady=5)
#Right Column of Initialization GUI:
Label(initial, text = 'Parameters',font=("Times 19 bold"), fg = 'blue').grid(row=0, sticky=W, padx=5, pady=5)
Label(initial, text = 'Enter the Values',font=("Times 19 bold"), fg = 'blue').grid(row=0, column=2, sticky=W, padx=5, pady=5)
Label(initial, text = 'Previous Values',font=("Times 19 bold"), fg = 'blue').grid(row=0, column=3, sticky=W, padx=5, pady=5)
Label(initial, text=lastrow[0],font=("Times 19 bold"), fg = 'gray').grid(row=1, sticky=E, column=3, padx=5, pady=5)
Label(initial, text=lastrow[1],font=("Times 19 bold"), fg = 'gray').grid(row=2, sticky=E, column=3, padx=5, pady=5)
Label(initial, text=lastrow[2],font=("Times 19 bold"), fg = 'gray').grid(row=3, sticky=E, column=3, padx=5, pady=5)
Label(initial, text=lastrow[3],font=("Times 19 bold"), fg = 'gray').grid(row=4, sticky=E, column=3, padx=5, pady=5)
Label(initial, text=lastrow[4],font=("Times 19 bold"), fg = 'gray').grid(row=5, sticky=E, column=3, padx=5, pady=5)
Label(initial, text=lastrow[5],font=("Times 19 bold"), fg = 'gray').grid(row=6, sticky=E, column=3, padx=5, pady=5)
Label(initial, text=lastrow[6],font=("Times 19 bold"), fg = 'gray').grid(row=7, sticky=E, column=3, padx=5, pady=5)
#Middle Column of Initialization GUI:
threshVarCap = Entry(initial, textvariable=a ,font=("Times 19 bold"))
threshVarCap.grid(row=1, column=2, padx=5, pady=5)
winndowWidth = Entry(initial, textvariable=b ,font=("Times 19 bold"))
winndowWidth.grid(row=2, column=2, padx=5, pady=5,)
alpha = Entry(initial, textvariable=c ,font=("Times 19 bold"))
alpha.grid(row=3, column=2, padx=5, pady=5)
rDataMat = Entry(initial, textvariable=d ,font=("Times 19 bold"))
rDataMat.grid(row=4, column=2, padx=5, pady=5)
cDataMat = Entry(initial, textvariable=e , font=("Times 19 bold"))
cDataMat.grid(row=5, column=2, padx=5, pady=5)
Time = Entry(initial, textvariable=h ,font=("Times 19 bold"))
Time.grid(row=6, column=2, padx=5, pady=5)
error = Entry(initial, textvariable=g ,font=("Times 19 bold"))
error.grid(row=7, column=2, padx=5, pady=5)
#Buttons in the Initialization GUI:
savebutton = Button(initial, text = ' Save ',font=("Times 17 bold"), fg='green', command=lambda: Save(1)).grid(row=8, column=0, sticky=W, padx=10, pady=10)
cancelbutton = Button(initial, text = 'Cancel',font=("Times 17 bold"), fg='red', command=lambda: cancel(1)).grid(row=8, column=3, sticky=E, padx=10, pady=10)
#Time-out for initial GUI
'''def timeout():
initial.destroy()
t = Timer(2, timeout)
t.start()'''
def doSomething():
os._exit(1)
initial.protocol('WM_DELETE_WINDOW', doSomething)
initial.mainloop()
#Initialization End
###############################################################################################################################################################
#Monitor
def main():
'''Here the GUI with Start and Exit Button lies'''
f_path = abspath("gui.png")
with open('Data.csv', 'r') as f:
try:
lastrow = deque(csv.reader(f), 1)[0]
except IndexError: # empty file
lastrow = None
def Start(m):
'''Start button is defined here'''
#Algorithm to Start the program only once then renders 'Start' button useless
global l
l = l + m
if (l == 1):
Thread(target = myfunction1).start() #Starting thread 1: RPCA
time.sleep(1) #Waiting time to start the error-detection after starting RPCA.
Thread(target = myfunction2).start() #Starting thread 2: Error detection
def myfunction1(): #Thread 1: For RPCA
test_rpca_ec.main()
def myfunction2(): #Thread 2: For Error-detection and display via GUI
errorcheck()
def errorcheck():
global cnt, z
#Initializing the LOG created for storing the error response from user.
#(Only the Titles of the CSV files are made here once and the file is overwritten each time the code is run)
with open('MyLog.csv', 'w+') as g:
w = csv.writer(g, delimiter=',')
w.writerow(['Time','Faulty SPND','Raw Value','Fault Estimate','Status'])
g.close
#Open the LOG file created by RPCA and read all columns to detect error
with open('AllLog.csv', 'rU') as f:
r = csv.DictReader(f, delimiter=',')
for row in r:
Fault_at_time = eval(row['Time'])
Faulty_SPND = eval(row['Faulty SPND'])
Raw_value_of_SPND = eval(row['Raw Value'])
Fault_Estimate = eval(row['Fault Estimate'])
if(Faulty_SPND!=0):
z = 1
errortime.append(Fault_at_time)
faultyspnd.append(Faulty_SPND)
raw.append(Raw_value_of_SPND)
faultEst.append(Fault_Estimate)
else:
z = 0
if (z==1):
cnt += 1
else:
cnt = 0
if(cnt == int(lastrow[6])):
cnt = 0
z = 0
time.sleep(float(lastrow[5])) #Waiting time before showing the groups of the error detected
#Printing the errors on the GUI monitor
text=('\n')
edit.insert(1.0, text)
for i in range(1, (int(lastrow[6])+1)):
text=('\n Faults at time: ' + str(errortime[-int(lastrow[6])+i-1]) + ' Faulty SPND: ' + str(faultyspnd[-int(lastrow[6])+i-1]) + ' Raw Value: ' + str(format(raw[-int(lastrow[6])+i-1], '+.14f')) + ' Fault Estimate:' + str(format(faultEst[-int(lastrow[6])+i-1], '+.14f')))
edit.insert(1.0, text)
#Error detection pop-up Yes and No Button Command Starts:
def ans(flag):
if(flag == 1): #For Yes
with open('MyLog.csv', 'a') as g:
w = csv.writer(g, delimiter=',')
w.writerow([Fault_at_time,Faulty_SPND,Raw_value_of_SPND,Fault_Estimate,'Y'])
myapp.destroy()
else: #For No
with open('MyLog.csv', 'a') as g:
w = csv.writer(g, delimiter=',')
w.writerow([Fault_at_time,Faulty_SPND,Raw_value_of_SPND,Fault_Estimate,'N'])
myapp.destroy()
#Error detection pop-up Yes and No Button Commands Ends
def doThis():
myapp.withdraw()
time.sleep(0.1)
myapp.geometry('+450-265')
myapp.deiconify()
pass
myapp = Tk()
myapp.attributes("-topmost", True) #To keep the pop-up above all open windows
myapp.protocol('WM_DELETE_WINDOW', doThis)
myapp.title('Fault Check')
myapp.geometry('+450-370') #Adjust to bring the error GUI to the center
Label(myapp, text=" Errornous Value(s) detected. Confirm?",font=("Times 19")).grid(row=0, sticky=W)
yesbutton = Button(myapp, text = ' Yes ',font=("Times 16"), fg='red', command=lambda: ans(1)).grid(row=1, column=0, sticky=W, padx=7, pady=7) #Error detection GUI 'Yes' button
nobutton = Button(myapp, text = ' No ', font=("Times 16"),fg='blue', command=lambda: ans(0)).grid(row=1, column=1, sticky=E, padx=7, pady=7) #Error detection GUI 'No' button
myapp.mainloop()
#GUI with Start and Exit Button starts:
label = Tk()
label.title('IITB NPCIL BARC BRNS SPND Fault Diagnosal')#Change the Title of the GUI here
label.geometry('+250-400') #Adjust to bring the GUI to the center
#edit=Text(label, height=int(lastrow[6])+2, width=120) #Height of the text-box changes according to the error group size.
edit=Text(label, height=12, width=120)
edit.pack(side = BOTTOM, padx=2, pady=2)
def Exit(): #Exit button function
label.destroy()
os._exit(1)
#Banner image in the GUI:
label.img = ImageTk.PhotoImage(Image.open(f_path))
label.panel1 = Label(label, image = label.img)
label.panel1.pack(side = "top", expand = "YES", padx=10, pady=10)
#Link below the banner:
w = Label(label,text="http://www.ee.iitb.ac.in/%7Ebelur/spnd/", font=("Times 20 bold italic"))
w.pack(side = "top", padx=5, pady=2)
#Exit and Start Button declaration:
exitbutton=Button(label, text = ' Exit ', fg='red',font=("Times 15"), command = Exit).pack(side=LEFT, padx=10, pady=2)
startbutton=Button(label, text = ' Start ',font=("Times 15"), fg='green', command =lambda: Start(1)).pack(side=RIGHT, padx=10, pady=2)
#Used to kill all threads if main GUI is closed in any way
def doSomething():
os._exit(1)
label.protocol('WM_DELETE_WINDOW', doSomething)
#Infinite loop necessary to keep GUI alive
label.mainloop()
#GUI with Start and Exit Button ends
if __name__ == '__main__':
initialization()
main()
|
test_smtplib.py
|
zaimportuj asyncore
zaimportuj email.mime.text
z email.message zaimportuj EmailMessage
z email.base64mime zaimportuj body_encode jako encode_base64
zaimportuj email.utils
zaimportuj socket
zaimportuj smtpd
zaimportuj smtplib
zaimportuj io
zaimportuj re
zaimportuj sys
zaimportuj time
zaimportuj select
zaimportuj errno
zaimportuj textwrap
zaimportuj unittest
z test zaimportuj support, mock_socket
spróbuj:
zaimportuj threading
wyjąwszy ImportError:
threading = Nic
HOST = support.HOST
jeżeli sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
dalej
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
spróbuj:
conn, addr = serv.accept()
wyjąwszy socket.timeout:
dalej
inaczej:
n = 500
dopóki buf oraz n > 0:
r, w, e = select.select([], [conn], [])
jeżeli w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
w_końcu:
serv.close()
evt.set()
klasa GeneralTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method jest no longer used but jest retained dla backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(smtp.source_address, ('127.0.0.1', 19876))
smtp.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port w host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname jest used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNic(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
spróbuj:
smtp = smtplib.SMTP(HOST, self.port)
w_końcu:
mock_socket.setdefaulttimeout(Nic)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNic(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNic(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
spróbuj:
smtp = smtplib.SMTP(HOST, self.port, timeout=Nic)
w_końcu:
socket.setdefaulttimeout(Nic)
self.assertIsNic(smtp.sock.gettimeout())
smtp.close()
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(1)
przy support.captured_stderr() jako stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(2)
przy support.captured_stderr() jako stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
spróbuj:
jeżeli hasattr(select, 'poll'):
poll_fun = asyncore.poll2
inaczej:
poll_fun = asyncore.poll
n = 1000
dopóki asyncore.socket_map oraz n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation jest finished, it will
# set client_evt, oraz it's then ok to kill the server
jeżeli client_evt.is_set():
serv.close()
przerwij
n -= 1
wyjąwszy socket.timeout:
dalej
w_końcu:
jeżeli nie client_evt.is_set():
# allow some time dla the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects w the tests below are created przy a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
@unittest.skipUnless(threading, 'Threading required dla this test.')
klasa DebuggingServerTests(unittest.TestCase):
maxDiff = Nic
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by dalejing 0 dla the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=Prawda)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client jest finished
self.client_evt.set()
# wait dla the server thread to terminate
self.serv_evt.wait()
self.thread.join()
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testSourceAddress(self):
# connect
port = support.find_unused_port()
spróbuj:
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3, source_address=('127.0.0.1', port))
self.assertEqual(smtp.source_address, ('127.0.0.1', port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
wyjąwszy OSError jako e:
jeżeli e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
podnieś
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented w DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented w DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, b'EXPN nie implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior jest specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect oraz send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test jest flaky oraz dies przy a bad file descriptor
# w asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment w testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment w testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment w testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment w testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment w testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header jest still w the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
# The Bcc header should nie be transmitted.
usuń m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
dla addr w ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing przerwijs jeżeli nie all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment w testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
dla addr w ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified w call override those w message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment w testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
dla addr w ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment w testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
dla addr w ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment w testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
usuń m['Bcc']
usuń m['Resent-Bcc']
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
dla addr w ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
przy self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
klasa NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should podnieś exceptions (at present the attempt w SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port podnieśs OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
# test response of client to a non-successful HELO message
@unittest.skipUnless(threading, 'Threading required dla this test.')
klasa BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello dla you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
@unittest.skipUnless(threading, 'Threading required dla this test.')
klasa TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = support.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
threading.Thread(target=server, args=servargs).start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_auth_credentials = {
'login': 'TXIuQUBzb21ld2hlcmUuY29t',
'plain': 'AE1yLkFAc29tZXdoZXJlLmNvbQBzb21lcGFzc3dvcmQ=',
'cram-md5': ('TXIUQUBZB21LD2HLCMUUY29TIDG4OWQ0MJ'
'KWZGQ4ODNMNDA4NTGXMDRLZWMYZJDMODG1'),
}
sim_auth_login_user = 'TXIUQUBZB21LD2HLCMUUY29T'
sim_auth_plain = 'AE1YLKFAC29TZXDOZXJLLMNVBQBZB21LCGFZC3DVCMQ='
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
klasa SimSMTPChannel(smtpd.SMTPChannel):
quit_response = Nic
mail_response = Nic
rcpt_response = Nic
data_response = Nic
rcpt_count = 0
rset_count = 0
disconnect = 0
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) dla x w extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = Prawda
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
jeżeli arg w sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
inaczej:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
jeżeli list_name w sim_lists:
user_list = sim_lists[list_name]
dla n, user_email w enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
jeżeli n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
inaczej:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
inaczej:
self.push('550 No access dla you!')
def smtp_AUTH(self, arg):
mech = arg.strip().lower()
jeżeli mech=='cram-md5':
self.push('334 {}'.format(sim_cram_md5_challenge))
albo_inaczej mech nie w sim_auth_credentials:
self.push('504 auth type unimplemented')
zwróć
albo_inaczej mech=='plain':
self.push('334 ')
albo_inaczej mech=='login':
self.push('334 ')
inaczej:
self.push('550 No access dla you!')
def smtp_QUIT(self, arg):
jeżeli self.quit_response jest Nic:
super(SimSMTPChannel, self).smtp_QUIT(arg)
inaczej:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
jeżeli self.mail_response jest Nic:
super().smtp_MAIL(arg)
inaczej:
self.push(self.mail_response)
jeżeli self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
jeżeli self.rcpt_response jest Nic:
super().smtp_RCPT(arg)
zwróć
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
jeżeli self.data_response jest Nic:
super().smtp_DATA(arg)
inaczej:
self.push(self.data_response)
def handle_error(self):
podnieś
klasa SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
dalej
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
podnieś
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something przy more features than DebuggingServer)
@unittest.skipUnless(threading, 'Threading required dla this test.')
klasa SMTPSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by dalejing 0 dla the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=Prawda)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client jest finished
self.client_evt.set()
# wait dla the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected z the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
dla k w expected_features:
self.assertPrawda(smtp.has_extn(k))
self.assertNieprawda(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
dla addr_spec, name w sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
dla listname, members w sim_lists.items():
users = []
dla m w members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access dla you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
# SimSMTPChannel doesn't fully support AUTH because it requires a
# synchronous read to obtain the credentials...so instead smtpd
# sees the credential sent by smtplib's login method jako an unknown command,
# which results w smtplib raising an auth error. Fortunately the error
# message contains the encoded credential, so we can partially check that it
# was generated correctly (partially, because the 'word' jest uppercased w
# the error message).
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
spróbuj: smtp.login(sim_auth[0], sim_auth[1], initial_response_ok=Nieprawda)
wyjąwszy smtplib.SMTPAuthenticationError jako err:
self.assertIn(sim_auth_plain, str(err))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
spróbuj: smtp.login(sim_auth[0], sim_auth[1])
wyjąwszy smtplib.SMTPAuthenticationError jako err:
self.assertIn(sim_auth_login_user, str(err))
smtp.close()
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
spróbuj: smtp.login(sim_auth[0], sim_auth[1])
wyjąwszy smtplib.SMTPAuthenticationError jako err:
self.assertIn(sim_auth_credentials['cram-md5'], str(err))
smtp.close()
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
spróbuj: smtp.login(sim_auth[0], sim_auth[1])
wyjąwszy smtplib.SMTPAuthenticationError jako err:
self.assertIn(sim_auth_login_user, str(err))
smtp.close()
def test_auth_function(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
self.serv.add_feature("AUTH CRAM-MD5")
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
supported = {'CRAM-MD5': smtp.auth_cram_md5,
'PLAIN': smtp.auth_plain,
'LOGIN': smtp.auth_login,
}
dla mechanism, method w supported.items():
spróbuj: smtp.auth(mechanism, method, initial_response_ok=Nieprawda)
wyjąwszy smtplib.SMTPAuthenticationError jako err:
self.assertIn(sim_auth_credentials[mechanism.lower()].upper(),
str(err))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=15)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
przy smtplib.SMTP(HOST, self.port) jako smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
przy smtplib.SMTP(HOST, self.port) jako smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
przy self.assertRaises(smtplib.SMTPResponseException) jako error:
przy smtplib.SMTP(HOST, self.port) jako smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests dla correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does nie podnieś SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = Prawda
przy self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNic(smtp.sock)
# Issue 5713: make sure close, nie rset, jest called jeżeli we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
przy self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNic(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
przy self.assertRaises(smtplib.SMTPRecipientsRefused) jako r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNic(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
klasa MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
jeżeli self.smtp_state == self.DATA:
self.push('421 closing')
inaczej:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
przy self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNic(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertPrawda(smtp.does_esmtp)
self.assertNieprawda(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
klasa SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server jest set up to munge the EHLO response, so we need to provide
# them jako well. And yes, the call jest to SMTPServer nie SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=Nic,
rcpt_options=Nic):
self.last_peer = peer
self.last_mailz = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
@unittest.skipUnless(threading, 'Threading required dla this test.')
klasa SMTPUTF8SimTests(unittest.TestCase):
maxDiff = Nic
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by dalejing 0 dla the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=Nieprawda,
enable_SMTPUTF8=Prawda)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client jest finished
self.client_evt.set()
# wait dla the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertPrawda(smtp.does_esmtp)
self.assertPrawda(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this jest an existing
# bug (jeżeli it jest one) oraz nie a problem przy the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <főo@bar.com>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(smtplib.SMTPNotSupportedError,
smtp.send_message(msg))
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
klasa SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows dla an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
jeżeli args[0].lower() == 'plain':
jeżeli len(args) == 2:
# AUTH PLAIN <initial-response> przy the response base 64
# encoded. Hard code the expected response dla the test.
jeżeli args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
zwróć
self.push('571 Bad authentication')
klasa SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
@unittest.skipUnless(threading, 'Threading required dla this test.')
klasa SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by dalejing 0 dla the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=Prawda)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client jest finished
self.client_evt.set()
# wait dla the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
@support.reap_threads
def test_main(verbose=Nic):
support.run_unittest(
BadHELOServerTests,
DebuggingServerTests,
GeneralTests,
NonConnectingTests,
SMTPAUTHInitialResponseSimTests,
SMTPSimTests,
TooLongLineTests,
)
jeżeli __name__ == '__main__':
test_main()
|
test_application.py
|
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pylint: disable-msg=C0301
#pylint: disable-msg=F0401
#pylint: disable-msg=W0142
"""Tests for application.py"""
import sys
import os
import unittest
import time
#import pprint
#import pdb
import warnings
from threading import Thread
import ctypes
import mock
import six
sys.path.append(".")
from pywinauto import Desktop
from pywinauto.windows import application, win32defines
from pywinauto.controls import hwndwrapper
from pywinauto.windows.application import Application
from pywinauto.base_application import WindowSpecification # noqa: E402
from pywinauto.windows.application import process_module
from pywinauto.windows.application import process_get_modules
from pywinauto.windows.application import ProcessNotFoundError
from pywinauto.windows.application import AppStartError
from pywinauto.windows.application import AppNotConnected
from pywinauto.controls.common_controls import TrackbarWrapper
from pywinauto import findwindows
from pywinauto import findbestmatch
from pywinauto.timings import Timings
from pywinauto.timings import TimeoutError
from pywinauto.timings import WaitUntil
from pywinauto.timings import always_wait_until
from pywinauto.timings import always_wait_until_passes
from pywinauto.timings import timestamp # noqa: E402
from pywinauto.sysinfo import is_x64_Python
from pywinauto.sysinfo import is_x64_OS
from pywinauto.sysinfo import UIA_support
#application.set_timing(1, .01, 1, .01, .05, 0, 0, .1, 0, .01)
# About dialog may take some time to load
# so make sure that we wait for it.
Timings.window_find_timeout = 5
def _notepad_exe():
if is_x64_Python() or not is_x64_OS():
return r"C:\Windows\System32\notepad.exe"
else:
return r"C:\Windows\SysWOW64\notepad.exe"
mfc_samples_folder_32 = mfc_samples_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\MFC_samples")
if is_x64_Python():
mfc_samples_folder = os.path.join(mfc_samples_folder, 'x64')
class ApplicationWarningTestCases(unittest.TestCase):
"""Unit tests for warnings in the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
# Force Display User and Deprecation warnings every time
# Python 3.3 + nose/unittest tries really hard to suppress them
for warning in (UserWarning, PendingDeprecationWarning):
warnings.simplefilter('always', warning)
if is_x64_Python():
self.sample_exe = os.path.join(mfc_samples_folder,
"CmnCtrl1.exe")
self.sample_exe_inverted_bitness = os.path.join(mfc_samples_folder_32,
"CmnCtrl1.exe")
else:
self.sample_exe = os.path.join(mfc_samples_folder_32, "CmnCtrl1.exe")
self.sample_exe_inverted_bitness = os.path.join(mfc_samples_folder,
"x64",
"CmnCtrl1.exe")
def testStartWarning3264(self):
if not is_x64_OS():
self.defaultTestResult()
return
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
app = Application().start(self.sample_exe_inverted_bitness)
app.kill()
assert len(w) >= 1
assert issubclass(w[-1].category, UserWarning)
assert "64-bit" in str(w[-1].message)
def testConnectWarning3264(self):
if not is_x64_OS():
self.defaultTestResult()
return
app = Application().start(self.sample_exe_inverted_bitness)
# Appveyor misteries...
self.assertEqual(app.is_process_running(), True)
with mock.patch("warnings.warn") as mockWarn:
Application().connect(pid=app.process)
app.kill()
args, kw = mockWarn.call_args
assert len(args) == 2
assert "64-bit" in args[0]
assert args[1].__name__ == 'UserWarning'
class ApplicationWin32KillTestCases(unittest.TestCase):
"""Unit tests for method Application.kill() with backend='win32'"""
backend = 'win32'
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.sample_exe = os.path.join(mfc_samples_folder, 'RowList.exe')
self.app = Application(backend=self.backend).start(self.sample_exe)
self.target_process = self.app.process
def tearDown(self):
self.app.kill(soft=False)
def test_kill_hard(self):
self.assertTrue(self.app.kill(soft=False))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
def test_kill_soft(self):
self.assertTrue(self.app.kill(soft=True))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
def test_already_killed_hard(self):
self.assertTrue(self.app.kill(soft=False))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
self.assertTrue(self.app.kill(soft=False)) # already killed, returned True anyway
def test_already_killed_soft(self):
self.assertTrue(self.app.kill(soft=False))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
self.assertTrue(self.app.kill(soft=True)) # already killed, returned True anyway
def test_kill_soft_with_modal_subdialog(self):
"""Kill the app with modal subdialog to cover win.force_close() call"""
self.app.RowListSampleApplication.menu_select('Help->About RowList...')
if self.backend == 'win32':
self.app.window(name='About RowList').wait('visible')
elif self.backend == 'uia':
self.app.RowListSampleApplication.by(name='About RowList').wait('visible')
else:
raise NotImplementedError('test_kill_soft_with_modal_subdialog: ' \
'backend "{}" is not supported'.format(self.backend))
self.assertTrue(self.app.kill(soft=True))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
self.assertTrue(self.app.kill(soft=True)) # already killed, returned True anyway
if UIA_support:
class ApplicationUiaKillTestCases(ApplicationWin32KillTestCases):
"""Unit tests for method Application.kill() with backend='uia'"""
backend = 'uia'
# the same test methods run here
if ctypes.windll.shell32.IsUserAnAdmin() == 0:
class AdminTestCases(ApplicationWarningTestCases):
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
super(AdminTestCases, self).setUp()
cmd = 'powershell -Command "Start-Process {} -Verb RunAs"'.format(self.sample_exe)
self.app = Application().start(cmd, wait_for_idle=False)
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
super(AdminTestCases, self).tearDown()
def test_non_admin_warning(self):
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.app = Application().connect(name="Common Controls Sample", timeout=20)
assert len(w) >= 1
assert issubclass(w[-1].category, UserWarning)
assert "process has no rights" in str(w[-1].message)
def test_non_admin_click(self):
self.app = Application().connect(name="Common Controls Sample", timeout=20)
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.OK.click()
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.OK.click_input()
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.TVS_HASBUTTON.check()
class NonAdminTestCases(ApplicationWarningTestCases):
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
super(NonAdminTestCases, self).setUp()
self.app = Application().start(self.sample_exe)
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
super(NonAdminTestCases, self).tearDown()
def test_both_non_admin(self):
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.app = Application().connect(name="Common Controls Sample", timeout=5)
assert len(w) == 0
def test_both_non_admin_click(self):
self.app = Application().connect(name="Common Controls Sample", timeout=5)
self.app.CommonControlsSample.TVS_HASBUTTON.check()
self.assertEqual(self.app.CommonControlsSample.TVS_HASBUTTON.is_checked(), True)
self.app.CommonControlsSample.OK.click()
self.app.CommonControlsSample.wait_not('visible')
class ApplicationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.prev_warn = warnings.showwarning
def no_warnings(*args, **kwargs): pass
warnings.showwarning = no_warnings
if is_x64_Python() or not is_x64_OS():
self.notepad_subpath = r"system32\notepad.exe"
else:
self.notepad_subpath = r"SysWOW64\notepad.exe"
def tearDown(self):
"""Close the application after tests"""
#self.dlg.SendMessage(win32defines.WM_CLOSE)
warnings.showwarning = self.prev_warn
def test__init__(self):
"""Verify that Application instance is initialized or not"""
self.assertRaises(ValueError, Application, backend='unregistered')
def test__iter__(self):
"""Verify that Application instance is not iterable"""
app = Application()
app.start(_notepad_exe())
with self.assertRaises(NotImplementedError):
for a in app:
pass
app.kill()
def test_not_connected(self):
"""Verify that it raises when the app is not connected"""
self.assertRaises (AppNotConnected, Application().__getattribute__, 'Hiya')
self.assertRaises (AppNotConnected, Application().__getitem__, 'Hiya')
self.assertRaises (AppNotConnected, Application().window_, name='Hiya')
self.assertRaises (AppNotConnected, Application().top_window_,)
def test_start_problem(self):
"""Verify start_ raises on unknown command"""
self.assertRaises (AppStartError, Application().start, 'Hiya')
def test_start(self):
"""test start() works correctly"""
app = Application()
self.assertEqual(app.process, None)
app.start(_notepad_exe())
self.assertNotEqual(app.process, None)
self.assertEqual(app.UntitledNotepad.process_id(), app.process)
notepadpath = os.path.join(os.environ['systemroot'], self.notepad_subpath)
self.assertEqual(str(process_module(app.process)).lower(), str(notepadpath).lower())
app.UntitledNotepad.menu_select("File->Exit")
def testStart_bug01(self):
"""On SourceForge forum AppStartError forgot to include %s for application name"""
app = Application()
self.assertEqual(app.process, None)
application.app_start_timeout = 1
app_name = r"I am not * and Application!/\.exe"
try:
app.start(app_name)
except AppStartError as e:
self.assertEqual(app_name in str(e), True)
# def testset_timing(self):
# """Test that set_timing sets the timing correctly"""
# prev_timing = (
# application.window_find_timeout,
# application.window_retry_interval,
# application.app_start_timeout,
# application.exists_timeout,
# application.exists_retry_interval,
# hwndwrapper.delay_after_click,
# hwndwrapper.delay_after_menuselect,
# hwndwrapper.delay_after_sendkeys_key,
# hwndwrapper.delay_after_button_click,
# hwndwrapper.delay_before_after_close_click,
# )
# set_timing(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
#
# self.assertEqual(
# (
# application.window_find_timeout,
# application.window_retry_interval,
# application.app_start_timeout,
# application.exists_timeout,
# application.exists_retry_interval,
# hwndwrapper.delay_after_click,
# hwndwrapper.delay_after_menuselect,
# hwndwrapper.delay_after_sendkeys_key,
# hwndwrapper.delay_after_button_click,
# hwndwrapper.delay_before_after_close_click,
# ), (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) )
#
# set_timing(*prev_timing)
def test_connect_path(self):
"""Test that connect_() works with a path"""
app1 = Application()
app1.start(_notepad_exe())
app_conn = Application()
app_conn.connect(path=self.notepad_subpath)
self.assertEqual(app1.process, app_conn.process)
app_conn = Application()
if is_x64_Python() or not is_x64_OS():
app_conn.connect(path=r"c:\windows\system32\notepad.exe")
else:
app_conn.connect(path=r"c:\windows\syswow64\notepad.exe")
self.assertEqual(app1.process, app_conn.process)
accessible_modules = process_get_modules()
accessible_process_names = [os.path.basename(name.lower()) for process, name, cmdline in accessible_modules]
self.assertEqual('notepad.exe' in accessible_process_names, True)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_path_timeout(self):
"""Test that connect_() works with a path with timeout"""
app1 = Application()
def delayed_launch():
time.sleep(2)
app1.start(_notepad_exe())
thread = Thread(target=delayed_launch)
thread.start()
app_conn = Application()
app_conn.connect(path=_notepad_exe(), timeout=3)
self.assertEqual(app1.process, app_conn.process)
accessible_modules = process_get_modules()
accessible_process_names = [os.path.basename(name.lower()) for process, name, cmdline in accessible_modules]
self.assertEqual('notepad.exe' in accessible_process_names, True)
app1.UntitledNotepad.menu_select('File->Exit')
def test_connect_path_timeout_problem(self):
"""Test that connect_() raise error when no process start"""
app1 = Application()
def delayed_launch():
time.sleep(1)
app1.start(_notepad_exe())
thread = Thread(target=delayed_launch)
thread.start()
self.assertRaises(ProcessNotFoundError, Application().connect, path=_notepad_exe(), timeout=0.5)
time.sleep(0.7)
app1.UntitledNotepad.menu_select('File->Exit')
def test_connect_process_timeout_failed(self):
"""Test that connect_(pid=...) raise error when set timeout"""
app1 = Application()
app1.start(_notepad_exe())
self.assertRaises(ProcessNotFoundError, Application().connect, pid=0, timeout=0.5)
app1.UntitledNotepad.menu_select('File->Exit')
# def test_Connect(self):
# """Test that connect_() works with a path"""
# app1 = Application()
# app1.start_("notepad.exe")
#
# app_conn = Application()
# app_conn.connect_(path = r"system32\notepad.exe")
# self.assertEqual(app1.process, app_conn.process)
#
# app_conn = Application()
# app_conn.connect_(path = r"c:\windows\system32\notepad.exe")
# self.assertEqual(app1.process, app_conn.process)
#
# app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_process(self):
"""Test that connect_() works with a process"""
app1 = Application()
app1.start(_notepad_exe())
app_conn = Application()
app_conn.connect(pid=app1.process)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_handle(self):
"""Test that connect_() works with a handle"""
app1 = Application()
app1.start(_notepad_exe())
handle = app1.UntitledNotepad.handle
app_conn = Application()
app_conn.connect(handle=handle)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_windowspec(self):
"""Test that connect_() works with a windowspec"""
app1 = Application()
app1.start(_notepad_exe())
#unused var: handle = app1.UntitledNotepad.handle
app_conn = Application()
try:
app_conn.connect(name="Untitled - Notepad")
except findwindows.WindowAmbiguousError:
wins = findwindows.find_elements(active_only=True, name="Untitled - Notepad")
app_conn.connect(handle = wins[0].handle)
except findwindows.ElementNotFoundError:
WaitUntil(30, 0.5, lambda: len(findwindows.find_elements(active_only=True, name="Untitled - Notepad")) > 0)
wins = findwindows.find_elements(active_only=True, name="Untitled - Notepad")
app_conn.connect(handle = wins[0].handle)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_raises(self):
"""Test that connect_() raises with invalid input"""
# try an argument that does not exist
self.assertRaises (
KeyError,
Application().connect, **{'not_arg': 23})
self.assertRaises (
RuntimeError,
Application().connect)
# try to pass an invalid process
self.assertRaises (
ProcessNotFoundError,
Application().connect, **{'pid': 0})
# try to pass an invalid handle
self.assertRaises(
RuntimeError,
Application().connect, **{'handle' : 0})
# try to pass an invalid path
self.assertRaises(
ProcessNotFoundError,
Application().connect, **{'path': "no app here", 'timeout': 0.0})
def test_top_window(self):
"""Test that top_window_() works correctly"""
Timings.window_find_timeout = 5
app = Application()
self.assertRaises(AppNotConnected, app.top_window_)
app.start(_notepad_exe())
self.assertEqual(app.UntitledNotepad.handle, app.top_window_().handle)
app.UntitledNotepad.menu_select("Help->About Notepad")
self.assertEqual(app.AboutNotepad.handle, app.top_window_().handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.menu_select("File->Exit")
app.UntitledNotepad.wait_not('exists')
self.assertRaises(RuntimeError, app.top_window_)
def test_active_window(self):
"""Test that active_() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.active_)
self.assertRaises(AppNotConnected, app.is64bit)
app.start(_notepad_exe())
app.UntitledNotepad.wait('ready')
self.assertEqual(app.active_().handle, app.UntitledNotepad.handle)
app.UntitledNotepad.menu_select("File->Exit")
app.UntitledNotepad.wait_not('exists')
self.assertRaises(RuntimeError, app.active_)
def test_cpu_usage(self):
"""Verify that cpu_usage() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.cpu_usage)
app.start(_notepad_exe())
self.assertEqual(0.0 <= app.cpu_usage() <= 100.0, True)
app.UntitledNotepad.menu_select("File->Exit")
app.UntitledNotepad.wait_not('exists')
def test_wait_cpu_usage_lower(self):
"""Test that wait_cpu_usage_lower() works correctly"""
if is_x64_Python() != is_x64_OS():
return None
Application().Start(r'explorer.exe')
def _cabinetwclass_exist():
"Verify if at least one active 'CabinetWClass' window is created"
l = findwindows.find_elements(active_only = True, class_name = 'CabinetWClass')
return (len(l) > 0)
WaitUntil(40, 0.5, _cabinetwclass_exist)
handle = findwindows.find_elements(active_only = True, class_name = 'CabinetWClass')[-1].handle
window = WindowSpecification({'handle': handle, 'backend': 'win32', })
explorer = Application().Connect(pid = window.process_id())
try:
explorer.WaitCPUUsageLower(threshold = 1.5, timeout = 60, usage_interval = 2)
window.AddressBandRoot.ClickInput()
window.TypeKeys(r'Control Panel\Programs\Programs and Features', with_spaces=True, set_foreground=True)
window.TypeKeys(r'{ENTER}', set_foreground = False)
WaitUntil(40, 0.5, lambda: len(findwindows.find_elements(active_only=True,
name='Programs and Features',
class_name='CabinetWClass')) > 0)
explorer.WaitCPUUsageLower(threshold = 1.5, timeout = 60, usage_interval = 2)
installed_programs = window.FolderView.texts()[1:]
programs_list = ','.join(installed_programs)
if ('Microsoft' not in programs_list) and ('Python' not in programs_list):
hwndwrapper.ImageGrab.grab().save(r'explorer_screenshot.jpg')
hwndwrapper.ActionLogger().log('\ninstalled_programs:\n')
for prog in installed_programs:
hwndwrapper.ActionLogger().log(prog)
self.assertEqual(('Microsoft' in programs_list) or ('Python' in programs_list), True)
finally:
window.Close(2.0)
if UIA_support:
def test_wait_cpu_usage_lower_uia(self):
"""Test that wait_cpu_usage_lower() works correctly for UIA"""
app = Application(backend='uia')
app.start('notepad.exe')
try:
app.wait_cpu_usage_lower(threshold = 1.5, timeout = 30, usage_interval = 2)
finally:
app.kill()
app.cpu_usage = mock.Mock(return_value=10)
self.assertRaises(
RuntimeError, app.wait_cpu_usage_lower,
threshold = 9.0, timeout = 5, usage_interval = 0.5
)
# def test_wait_for_idle_exception(self):
# """Test that method start() raises an exception when wait for idle failed"""
# app = Application()
# self.assertRaises(Exception, app.start, 'cmd.exe')
# # TODO: test and fix the case when cmd.exe can't be killed by app.kill()
def test_windows(self):
"""Test that windows_() works correctly"""
Timings.window_find_timeout = 5
app = Application()
self.assertRaises(AppNotConnected, app.windows_, **{'title' : 'not connected'})
app.start('notepad.exe')
self.assertRaises(ValueError, app.windows_, **{'backend' : 'uia'})
notepad_handle = app.UntitledNotepad.handle
self.assertEqual(app.windows(visible=True), [notepad_handle])
app.UntitledNotepad.menu_select("Help->About Notepad")
aboutnotepad_handle = app.AboutNotepad.handle
self.assertEqual(
app.windows(visible=True, enabled=None),
[aboutnotepad_handle, notepad_handle])
app.AboutNotepad.OK.Click()
app.UntitledNotepad.menu_select("File->Exit")
def test_window(self):
"""Test that window_() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.window_, **{'title' : 'not connected'})
app.start(_notepad_exe())
self.assertRaises(ValueError, app.windows_, **{'backend' : 'uia'})
title = app.window(name="Untitled - Notepad")
title_re = app.window(name_re="Untitled[ -]+Notepad")
classname = app.window(class_name="Notepad")
classname_re = app.window(class_name_re="Not..ad")
handle = app.window(handle=title.handle)
bestmatch = app.window(best_match="Untiotled Notepad")
self.assertNotEqual(title.handle, None)
self.assertNotEqual(title.handle, 0)
self.assertEqual(title.handle, title_re.handle)
self.assertEqual(title.handle, classname.handle)
self.assertEqual(title.handle, classname_re.handle)
self.assertEqual(title.handle, handle.handle)
self.assertEqual(title.handle, bestmatch.handle)
app.UntitledNotepad.menu_select("File->Exit")
def test_getitem(self):
"""Test that __getitem__() works correctly"""
Timings.window_find_timeout = 5
app = Application()
app.start(_notepad_exe())
self.assertRaises(Exception, app['blahblah'])
self.assertRaises(
findbestmatch.MatchError,
app['blahblah']['not here'].__getitem__, 'handle')
self.assertEqual(
app[u'Unt\xeftledNotepad'].handle,
app.window(name="Untitled - Notepad").handle)
app.UntitledNotepad.menu_select("Help->About Notepad")
self.assertEqual(
app['AboutNotepad'].handle,
app.window(name="About Notepad").handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.menu_select("File->Exit")
def test_getattribute(self):
"""Test that __getattribute__() works correctly"""
Timings.window_find_timeout = 5
app = Application()
app.start(_notepad_exe())
self.assertRaises(
findbestmatch.MatchError,
app.blahblah.__getattribute__, 'handle')
self.assertEqual(
app.UntitledNotepad.handle,
app.window(name="Untitled - Notepad").handle)
app.UntitledNotepad.menu_select("Help->About Notepad")
# I think it's OK that this no longer raises a matcherror
# just because the window is not enabled - doesn't mean you
# should not be able to access it at all!
#self.assertRaises(findbestmatch.MatchError,
# app.Notepad.__getattribute__, 'handle')
self.assertEqual(
app.AboutNotepad.handle,
app.window(name="About Notepad").handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.menu_select("File->Exit")
def test_kill(self):
"""test killing the application"""
app = Application()
app.start(_notepad_exe())
app.UntitledNotepad.Edit.type_keys("hello")
app.UntitledNotepad.menu_select("File->Print...")
#app.Print.FindPrinter.Click() # Vasily: (Win7 x64) "Find Printer" dialog is from splwow64.exe process
#app.FindPrinters.Stop.Click()
app.kill()
self.assertRaises(AttributeError, app.UntitledNotepad.Edit)
def test_process_is_running(self):
"""Tests process is running and wait for exit function"""
app = Application()
app.start(_notepad_exe())
app.UntitledNotepad.wait("ready")
self.assertTrue(app.is_process_running())
self.assertRaises(TimeoutError, lambda: app.wait_for_process_exit(timeout=5, retry_interval=1))
app.kill()
app.wait_for_process_exit()
self.assertFalse(app.is_process_running())
def test_should_return_not_running_if_not_started(self):
"""Tests that works on new instance
is_process_running/wait_for_process_exit can be called on not started/disconnected instance
"""
app = Application()
app.wait_for_process_exit(timeout=10, retry_interval=1)
self.assertFalse(app.is_process_running())
class TestInheritedApp(Application):
"""Our inherited version of class"""
def test_method(self):
"""This method should be called without any issues"""
return self is not None
def test_application_inheritance(self):
"""Test that Application class can be inherited and has it's own methods"""
app = ApplicationTestCases.TestInheritedApp()
self.assertTrue(app.test_method())
def test_non_magic_application(self):
app = Application()
self.assertEqual(app.allow_magic_lookup, True)
app_no_magic = Application(allow_magic_lookup=False)
self.assertEqual(app_no_magic.allow_magic_lookup, False)
app_no_magic.start(_notepad_exe())
window = app_no_magic.window(best_match="UntitledNotepad")
dlg = window.by(best_match="Edit")
dlg.draw_outline()
with self.assertRaises(AttributeError):
app_no_magic.UntitledNotepad
with self.assertRaises(AttributeError):
window.Edit
app_no_magic.kill()
app_no_magic.wait_for_process_exit()
class WindowSpecificationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend="win32").start(_notepad_exe())
self.dlgspec = self.app.UntitledNotepad
self.ctrlspec = self.app.UntitledNotepad.Edit
def tearDown(self):
"""Close the application after tests"""
# close the application
#self.app.UntitledNotepad.menu_select("File->Exit")
self.app.kill()
def test__init__(self):
"""Test creating a new spec by hand"""
wspec = WindowSpecification(
dict(
best_match=u"UntitledNotepad",
app=self.app)
)
self.assertEqual(
wspec.window_text(),
u"Untitled - Notepad")
self.assertEqual(self.dlgspec.app, self.app)
self.assertEqual(self.ctrlspec.app, self.app)
self.assertEqual(wspec.app, self.app)
def test__init__both_keywords(self):
"""Test creating a new spec with ambiguity by process and app simultaneously"""
self.assertRaises(KeyError, WindowSpecification,
dict(best_match=u"UntitledNotepad", app=self.app, pid=self.app.process)
)
def test__call__(self):
"""Test that __call__() correctly raises an error"""
self.assertRaises(AttributeError, self.dlgspec)
self.assertRaises(AttributeError, self.ctrlspec)
# no best_match!
wspec = WindowSpecification(
dict(name=u"blah", app=self.app)
)
self.assertRaises(AttributeError, wspec)
def test_wrapper_object(self):
"""Test that we can get a control"""
self.assertEqual(True, isinstance(self.dlgspec, WindowSpecification))
self.assertEqual(
True,
isinstance(self.dlgspec.find(), hwndwrapper.HwndWrapper)
)
def test_window(self):
"""test specifying a sub window of an existing specification"""
sub_spec = self.dlgspec.by(class_name ="Edit")
sub_spec_legacy = self.dlgspec.window(class_name = "Edit")
self.assertEqual(True, isinstance(sub_spec, WindowSpecification))
self.assertEqual(sub_spec.class_name(), "Edit")
self.assertEqual(sub_spec_legacy.class_name(), "Edit")
def test__getitem__(self):
"""test item access of a windowspec"""
self.assertEqual(
True,
isinstance(self.dlgspec['Edit'], WindowSpecification)
)
self.assertEqual(self.dlgspec['Edit'].class_name(), "Edit")
self.assertRaises(AttributeError, self.ctrlspec.__getitem__, 'edit')
def test_getattr(self):
"""Test getting attributes works correctly"""
self.assertEqual(
True,
isinstance(self.dlgspec.Edit, WindowSpecification)
)
self.assertEqual(self.dlgspec.Edit.class_name(), "Edit")
# check that getting a dialog attribute works correctly
self.assertEqual(
"Notepad",
self.dlgspec.class_name())
# Check handling 'parent' as a WindowSpecification
spec = self.ctrlspec.by(parent=self.dlgspec, visible=True)
self.assertEqual(spec.class_name(), "Edit")
def test_non_magic_getattr(self):
ws = WindowSpecification(dict(best_match="Notepad"))
self.assertEqual(ws.allow_magic_lookup, True)
ws_no_magic = WindowSpecification(dict(best_match="Notepad"), allow_magic_lookup=False)
self.assertEqual(ws_no_magic.allow_magic_lookup, False)
dlg = ws_no_magic.by(best_match="Edit")
has_focus = dlg.has_keyboard_focus()
self.assertIn(has_focus, (True, False))
with self.assertRaises(AttributeError):
ws_no_magic.Edit
def test_exists(self):
"""Check that windows exist"""
self.assertEqual(True, self.dlgspec.exists())
self.assertEqual(True, self.dlgspec.exists(0))
self.assertEqual(True, self.ctrlspec.exists())
# TODO: test a control that is not visible but exists
#self.assertEqual(True, self.app.DefaultIME.exists())
start = timestamp()
self.assertEqual(False, self.app.BlahBlah.exists(timeout=.1))
self.assertEqual(True, timestamp() - start < .3)
start = timestamp()
self.assertEqual(False, self.app.BlahBlah.exists(timeout=3))
self.assertEqual(True, 2.7 < timestamp() - start < 3.3)
def test_exists_timing(self):
"""test the timing of the exists method"""
# try ones that should be found immediately
start = timestamp()
self.assertEqual(True, self.dlgspec.exists())
self.assertEqual(True, timestamp() - start < .3)
start = timestamp()
self.assertEqual(True, self.ctrlspec.exists())
self.assertEqual(True, timestamp() - start < .3)
# try one that should not be found
start = timestamp()
self.assertEqual(True, self.dlgspec.exists(.5))
timedif = timestamp() - start
self.assertEqual(True, .49 > timedif < .6)
def test_wait(self):
"""test the functionality and timing of the wait method"""
allowable_error = .2
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait("enaBleD "))
time_taken = (timestamp() - start)
if not 0 <= time_taken < (0 + 2 * allowable_error):
self.assertEqual(.02, time_taken)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(" ready"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(" exiSTS"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(" VISIBLE "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(" ready enabled"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait("visible exists "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait("exists "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait("actIve "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
self.assertRaises(SyntaxError, self.dlgspec.wait, "Invalid_criteria")
def test_wait_non_existing(self):
"""test timing of the wait method for non-existing element"""
allowable_error = .2
start = timestamp()
self.assertRaises(TimeoutError, self.app.BlahBlah.wait, 'exists')
expected = Timings.window_find_timeout
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
def test_wait_invisible(self):
"""test timing of the wait method for non-existing element and existing invisible one"""
# TODO: re-use an MFC sample for this test
allowable_error = .2
start = timestamp()
self.assertRaises(TimeoutError, self.app.BlahBlah.wait, 'visible')
expected = Timings.window_find_timeout
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
# make sure Status Bar is not visible
status_bar_menu = self.app.UntitledNotepad.menu().item('&View').sub_menu().item('&Status Bar')
if status_bar_menu.is_checked():
status_bar_menu.select()
# check that existing invisible control is still found with 'exists' criterion
status_bar_spec = self.app.UntitledNotepad.by(class_name="msctls_statusbar32", visible=None)
self.assertEqual('StatusBar', status_bar_spec.wait('exists').friendly_class_name())
start = timestamp()
self.assertRaises(TimeoutError, status_bar_spec.wait, 'exists visible')
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, status_bar_spec.wait, 'visible exists')
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
def test_wait_not(self):
"""
Test that wait not fails for all the following
* raises and error when criteria not met
* timing is close to the timeout value
"""
allowable_error = .16
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "enaBleD ", .1, .05)
taken = timestamp() - start
if .1 < (taken) > .1 + allowable_error:
self.assertEqual(.12, taken)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " ready", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " exiSTS", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " VISIBLE ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " ready enabled", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "visible exists ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "exists ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "actIve ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
self.assertRaises(SyntaxError, self.dlgspec.wait_not, "Invalid_criteria")
# def test_wait_ready(self):
# """Make sure the friendly class is set correctly"""
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitReady(.1, .05))
#
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
#
# def testWaitNotReady(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotReady, .1, .05)
#
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
#
#
# def testWaitEnabled(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitEnabled(.1, .05))
#
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
#
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
#
# def testWaitNotEnabled(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotEnabled, .1, .05)
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
#
# def testWaitVisible(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitVisible(.1, .05))
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
# def testWaitNotVisible(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotVisible, .1, .05)
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# def testWaitExists(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitExists(.1, .05))
#
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# def testWaitNotExists(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotExists, .1, .05)
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
def test_depth(self):
"""Test that descendants() with depth works correctly"""
self.dlgspec.menu_select("Format -> Font")
self.assertNotEqual(
len(self.app['Font'].descendants(depth=1)),
len(self.app['Font'].descendants(depth=2)))
def test_dump_tree(self):
"""Make sure dump_tree() doesn't crash"""
self.dlgspec.dump_tree()
self.ctrlspec.dump_tree()
def test_dump_tree_file_output(self):
"""Make sure dump_tree() creates correct file"""
output_filename = "test_dump_tree.txt"
self.dlgspec.dump_tree(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, "r") as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue("'Untitled - NotepadEdit'" in content
and "'Edit'" in content)
self.assertTrue("child_window(class_name=\"msctls_statusbar32\"" in content)
os.remove(output_filename)
else:
self.fail("dump_tree can't create a file")
self.ctrlspec.dump_tree(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, "r") as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue("child_window(class_name=\"Edit\")" in content)
os.remove(output_filename)
else:
self.fail("dump_tree can't create a file")
def test_find_elements_re(self):
"""Test for bug #90: A crash in 'find_elements' when called with 'title_re' argument"""
self.dlgspec.wait('visible')
windows = findwindows.find_elements(name_re="Untitled - Notepad")
self.assertTrue(len(windows) >= 1)
class ChildWindowSpecificationFromWrapperTests(unittest.TestCase):
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend="win32").start(_notepad_exe())
self.ctrlspec = self.app.window(found_index=0).find().by(class_name='Edit')
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_wrapper_object(self):
"""Test that we can get a control"""
self.assertEqual(True, isinstance(self.ctrlspec, WindowSpecification))
self.assertEqual(
True,
isinstance(self.ctrlspec.find(), hwndwrapper.HwndWrapper)
)
def test_parent(self):
"""Test recreating specification from parent dialog wrapper"""
dlg = self.ctrlspec.parent()
sub_spec = dlg.by(class_name ="Edit")
self.assertEqual(True, isinstance(sub_spec, WindowSpecification))
self.assertEqual(sub_spec.class_name(), "Edit")
self.assertEqual(self.ctrlspec.handle, sub_spec.handle)
def test_dump_tree_file_output(self):
"""Make sure dump_tree() creates correct file"""
output_filename = "test_dump_tree.txt"
self.ctrlspec.dump_tree(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, "r") as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue("child_window(class_name=\"Edit\")" in content)
os.remove(output_filename)
else:
self.fail("dump_tree can't create a file")
def test_properties(self):
"""Check control properties"""
self.assertEqual(self.ctrlspec.class_name(), "Edit")
self.assertTrue(self.ctrlspec.exists())
if UIA_support:
class UIAWindowSpecificationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class with UIA backend"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend="uia").start(_notepad_exe())
self.dlgspec = self.app.UntitledNotepad
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_child_window_depth(self):
"""Test that child_window() with depth works correctly"""
# TODO fix same elements at different tree levels on win32 backend
self.dlgspec.menu_select("Format -> Font")
font = self.dlgspec.by(name="Font")
with self.assertRaises(findbestmatch.MatchError):
font.by(best_match="ListBox0", depth=1).find()
font.by(best_match="ListBox0", depth=2).find()
class WaitUntilDecoratorTests(unittest.TestCase):
"""Unit tests for always_wait_until and always_wait_until_passes decorators"""
def test_always_wait_until_decorator_success(self):
"""Test always_wait_until_decorator success"""
@always_wait_until(4, 2)
def foo():
return True
self.assertTrue(foo())
def test_always_wait_until_decorator_failure(self):
"""Test wait_until_decorator failure"""
@always_wait_until(4, 2)
def foo():
return False
self.assertRaises(TimeoutError, foo)
def test_always_wait_until_passes_decorator_success(self):
"""Test always_wait_until_passes_decorator success"""
@always_wait_until_passes(4, 2)
def foo():
return True
self.assertTrue(foo())
def test_always_wait_until_passes_decorator_failure(self):
"""Test always_wait_until_passes_decorator failure"""
@always_wait_until_passes(4, 2)
def foo():
raise Exception("Unexpected Error in foo")
self.assertRaises(TimeoutError, foo)
class MultiLevelWindowSpecificationTests(unittest.TestCase):
"""Unit tests for multi-level (3+) WindowSpecification objects"""
if UIA_support:
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.slow()
self.app = Application(backend='uia').start(os.path.join(mfc_samples_folder, u"RowList.exe"))
self.dlg = self.app.RowListSampleApplication
def tearDown(self):
"""Close the application after tests"""
self.dlg.CloseButton.click()
self.dlg.wait_not('visible')
def test_3level_specification(self):
"""Test that controls can be accessed by 3 levels of attributes"""
self.dlg.Toolbar.About.click()
self.dlg.AboutRowList.OK.click()
#self.dlg.AboutRowList.wait_not('visible') # XXX: it takes more than 50 seconds!
else: # Win32
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend='win32').start(os.path.join(mfc_samples_folder, u"CmnCtrl3.exe"))
self.dlg = self.app.CommonControlsSample
def tearDown(self):
"""Close the application after tests"""
self.dlg.SendMessage(win32defines.WM_CLOSE)
def test_4level_specification(self):
"""Test that controls can be accessed by 4 levels of attributes"""
self.assertEqual(self.dlg.CPagerCtrl.Pager.Toolbar.button_count(), 12)
if UIA_support:
class DesktopUiaWindowSpecificationTests(unittest.TestCase):
"""Unit tests for Desktop(backend='uia') object"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.slow()
self.app = Application().start('explorer.exe "' + mfc_samples_folder_32 + '"')
self.desktop = Desktop(backend='uia')
self.desktop_no_magic = Desktop(backend='uia', allow_magic_lookup=False)
def tearDown(self):
"""Close the application after tests"""
self.desktop.MFC_samplesDialog.close()
self.desktop.MFC_samplesDialog.wait_not('exists')
def test_folder_list(self):
"""Test that ListViewWrapper returns correct files list in explorer.exe"""
files_list = self.desktop.MFC_samplesDialog.Shell_Folder_View.Items_View.find()
self.assertEqual([item.window_text() for item in files_list.get_items()],
[u'x64', u'BCDialogMenu.exe', u'CmnCtrl1.exe', u'CmnCtrl2.exe', u'CmnCtrl3.exe',
u'CtrlTest.exe', u'mfc100u.dll', u'NewControls.exe', u'RebarTest.exe', u'RowList.exe', u'TrayMenu.exe'])
self.assertEqual(files_list.item('RebarTest.exe').window_text(), 'RebarTest.exe')
def test_set_backend_to_window_uia(self):
"""Set backend to method window(), except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.window(backend='uia', name='MFC_samplesDialog')
with self.assertRaises(ValueError):
self.desktop.window(backend='win32', name='MFC_samplesDialog')
def test_get_list_of_windows_uia(self):
"""Test that method .windows() returns a non-empty list of windows"""
dlgs = self.desktop.windows()
self.assertTrue(len(dlgs) > 1)
def test_set_backend_to_windows_uia(self):
"""Set backend to method .windows(), except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.windows(backend='win32')
with self.assertRaises(ValueError):
self.desktop.windows(backend='uia')
def test_only_visible_windows_uia(self):
"""Set visible=True to method .windows()"""
dlgs = self.desktop.windows(visible=True)
self.assertTrue(all([win.is_visible() for win in dlgs]))
def test_only_enable_windows_uia(self):
"""Set enable_only to the method windows"""
dlgs = self.desktop.windows(enabled=True)
self.assertTrue(all([win.is_enabled() for win in dlgs]))
def test_non_magic_desktop(self):
from pywinauto.controls.uiawrapper import UIAWrapper
self.assertEqual(self.desktop.allow_magic_lookup, True)
self.assertEqual(self.desktop_no_magic.allow_magic_lookup, False)
dlgs = self.desktop_no_magic.windows()
self.assertTrue(len(dlgs) > 1)
window = self.desktop_no_magic.window(name="MFC_samples")
self.assertEqual(window.allow_magic_lookup, False)
dlg = window.by(class_name="ShellTabWindowClass").find()
self.assertIsInstance(dlg, UIAWrapper)
has_focus = dlg.has_keyboard_focus()
self.assertIn(has_focus, (True, False))
with self.assertRaises(AttributeError):
self.desktop_no_magic.MFC_samples
with self.assertRaises(AttributeError):
window.ShellTabWindowClass
class DesktopWin32WindowSpecificationTests(unittest.TestCase):
"""Unit tests for Desktop(backend='win32') object"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend='win32').start(os.path.join(mfc_samples_folder, u"CmnCtrl3.exe"))
self.desktop = Desktop(backend='win32')
self.desktop_no_magic = Desktop(backend='win32', allow_magic_lookup=False)
self.window_title = 'Common Controls Sample'
def tearDown(self):
"""Close the application after tests"""
self.desktop.window(name=self.window_title, pid=self.app.process).SendMessage(win32defines.WM_CLOSE)
def test_simple_access_through_desktop(self):
"""Test that controls can be accessed by 4 levels of attributes"""
dlg = self.desktop.window(name=self.window_title, pid=self.app.process)
self.assertEqual(dlg.Pager.Toolbar.button_count(), 12)
def test_set_backend_to_window_win32(self):
"""Set backend to method window(), except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.window(backend='uia', name=self.window_title, pid=self.app.process)
with self.assertRaises(ValueError):
self.desktop.window(backend='win32', name=self.window_title, pid=self.app.process)
def test_get_list_of_windows_win32(self):
"""Test that method .windows() returns a non-empty list of windows"""
dlgs = self.desktop.windows()
self.assertTrue(len(dlgs) > 1)
window_titles = [win_obj.window_text() for win_obj in dlgs]
self.assertTrue(self.window_title in window_titles)
def test_set_backend_to_windows_win32(self):
"""Set backend to method windows, except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.windows(backend='win32')
with self.assertRaises(ValueError):
self.desktop.windows(backend='uia')
def test_only_visible_windows_win32(self):
"""Set visible=True to method .windows()"""
dlgs = self.desktop.windows(visible=True)
self.assertTrue(all([win.is_visible() for win in dlgs]))
def test_only_enable_windows_win32(self):
"""Set enable_only to the method windows"""
dlgs = self.desktop.windows(enabled=True)
self.assertTrue(all([win.is_enabled() for win in dlgs]))
def test_from_point_win32(self):
"""Test method Desktop(backend='win32').from_point(x, y)"""
combo = self.app.Common_Controls_Sample.ComboBox.find()
x, y = combo.rectangle().mid_point()
combo_from_point = self.desktop.from_point(x, y)
self.assertEqual(combo, combo_from_point)
def test_top_from_point_win32(self):
"""Test method Desktop(backend='win32').top_from_point(x, y)"""
combo = self.app.Common_Controls_Sample.ComboBox.find()
dlg = self.app.Common_Controls_Sample.find()
x, y = combo.rectangle().mid_point()
dlg_from_point = self.desktop.top_from_point(x, y)
self.assertEqual(dlg, dlg_from_point)
def test_non_magic_desktop(self):
self.assertEqual(self.desktop.allow_magic_lookup, True)
self.assertEqual(self.desktop_no_magic.allow_magic_lookup, False)
window = self.desktop_no_magic.window(name=self.window_title, pid=self.app.process)
self.assertEqual(window.allow_magic_lookup, False)
dlg = window.by(class_name="msctls_trackbar32").find()
self.assertIsInstance(dlg, TrackbarWrapper)
pos = dlg.get_position()
self.assertIsInstance(pos, six.integer_types)
with self.assertRaises(AttributeError):
getattr(self.desktop_no_magic, self.window_title.replace(" ", "_"))
with self.assertRaises(AttributeError):
window.msctls_trackbar32
if __name__ == "__main__":
unittest.main()
|
TestProxy.py
|
import os,time
from PIL import Image
import numpy as np
import tensorflow as tf
from IutyLib.mutithread.threads import LoopThread
from IutyLib.commonutil.config import Config
from multiprocessing import Process,Manager
from prx.PathProxy import PathProxy
from prx.ClassProxy import ClassProxy
from prx.CoachProxy import CoachProxy
#from prx.CoachProxy import ins_CoachProxy
from object.Config import CNNDivParam,CNNDivSetting
import object.datasets as datasets
from object.models import getModel
def killTensorBoard():
os.system(r'taskkill /f /t /im "tensorboard.exe" ')
pass
class TestProxy:
"""
api here
"""
def testPicture(project,tag,path,isfile):
isfile = False
rtn = {'success':False}
succ = False
data = {}
details = {}
manager = Manager()
return_dict = manager.dict()
process = Process(target=TestProxy.predictMulti,args=(project,tag,path,isfile,return_dict))
process.start()
process.join()
process.kill()
"""
succ,data,details = TestProxy.predictModel(project,tag,path,isfile)
if not succ:
rtn['error'] = data
return rtn
rtn['data'] = data
rtn['details'] = details
"""
rtn["data"] = return_dict["data"]
rtn["details"] = return_dict["details"]
rtn['success'] = True
return rtn
"""
methods here
"""
def getStatistics(test_result):
result = {}
for item in test_result:
if not item["result"] in result:
result[item["result"]] = 0
result[item["result"]] += 1
return result
def getGroup(datatype,path,file):
if datatype == "cnn-div":
group_x,data_x = datasets.BmpGroup.load_pictures(path,file)
else:
raise Exception("unknown data type")
return group_x,data_x
def predictMulti(projectname,tag,path,file,rtn):
succ,data,details = TestProxy.predictModel(projectname,tag,path,file,rtn)
pass
def predictModel(projectname,tag,path,file,out_rtn = {}):
killTensorBoard()
param = CNNDivParam(projectname,tag)
datasetname = param.Type()
modelname = param.Model()
classes = param.Classes()
model = getModel(modelname)
"""
model.compile(optimizer=adam,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
"""
tagpath = PathProxy.getModelTagDir(projectname,tag)
checkpoint_path = os.path.join(tagpath,"checkpoints","save.ckpt")
result = []
if os.path.exists(checkpoint_path + '.index'):
print('-------------load the model-----------------')
model.load_weights(checkpoint_path)
else:
raise Exception("No model called")
group_x,data_x = TestProxy.getGroup(datasetname,path,file)
"""
for g_x in group_x:
print(g_x)
r = model.predict(np.array[g_x,])
result.append(r)
"""
result = model.predict(data_x,use_multiprocessing=True)
result = tf.argmax(result, axis=1)
result = result.numpy()
details={}
rtn = {}
for i in range(len(group_x)):
rtn[group_x[i]] = classes[str(result[i])]
if not classes[str(result[i])] in details:
details[classes[str(result[i])]] = 0
details[classes[str(result[i])]]+=1
out_rtn['success'] = True
out_rtn['data'] = rtn
out_rtn['details'] = details
return True, rtn,details
if __name__ == "__main__":
TestProxy.predictModel("Lightning","20210222_131420",r"D:\FastCNN\Projects\Lightning\test\暗点")
|
base.py
|
import json
import copy
import threading
import array
import struct
import time
import csv
import os
import random
from sources.utils.sml_runner import SMLRunner
try:
from sources.buffers import CircularBufferQueue, CircularResultsBufferQueue
except:
from buffers import CircularBufferQueue, CircularResultsBufferQueue
class BaseReader(object):
""" Base Reader Object, describes the methods that must be implemented for each data source"""
def __init__(self, config, device_id=None, name=None, **kwargs):
self.samples_per_packet = config["CONFIG_SAMPLES_PER_PACKET"]
self.class_map = config["CLASS_MAP"]
self.model_json = config["MODEL_JSON"]
self.loop = config["LOOP"]
self.source_samples_per_packet = None
self.data_type = config.get("DATA_TYPE", "int16")
self.sml_library_path = config.get("SML_LIBRARY_PATH", None)
self.run_sml_model = config.get("RUN_SML_MODEL", False)
self.convert_to_int16 = config.get("CONVERT_TO_INT16", False)
self.scaling_factor = config.get("SCALING_FACTOR", 1)
self.sml = None
self.sample_rate = None
self.config_columns = None
self.device_id = device_id
self.recording = False
self.streaming = False
self._thread = None
self._record_thread = None
self.buffer = None
self.rbuffer = None
self._lock = threading.Lock()
@property
def data_width(self):
if self.config_columns is None:
return 0
return len(self.config_columns)
@property
def data_byte_size(self):
INT16_BYTE_SIZE = 2
FLOAT32_BYTE_SIZE = 4
if self.data_type == "int16":
return INT16_BYTE_SIZE
elif self.data_type == "float":
return FLOAT32_BYTE_SIZE
return INT16_BYTE_SIZE
@property
def data_type_str(self):
if self.data_type == "int16":
return "h"
elif self.data_type == "float":
return "f"
return INT16_BYTE_SIZE
@property
def data_type_cast(self):
if self.data_type == "int16":
return int
elif self.data_type == "float":
return float
return int
@property
def data_width_bytes(self):
return self.data_width * self.data_byte_size
@property
def packet_buffer_size(self):
return self.samples_per_packet * self.source_buffer_size
@property
def source_buffer_size(self):
if self.source_samples_per_packet is None:
return self.data_byte_size
return self.source_samples_per_packet * self.data_width_bytes
@staticmethod
def _validate_config(config):
if not isinstance(config, dict):
raise Exception("Invalid Configuration")
if config.get("column_location", None) is None:
raise Exception("Invalid Configuration: no column_location")
if config.get("sample_rate", None) is None:
raise Exception("Invalid Configuration: no sample_rate")
if config.get("samples_per_packet", None) is None:
raise Exception("Invalid Configuration: no samples_per_packet")
print("Found configuration:", config)
return config
@staticmethod
def _validate_results_data(data):
try:
tmp = json.loads(data)
if isinstance(tmp, dict) and tmp:
return True
except Exception as e:
print(e)
return False
def is_recording(self):
return self.recording
def is_streaming(self):
return self.streaming
def list_available_devices(self):
return []
def _send_subscribe(self):
pass
def read_config(self):
""" read the config from the device and set the properties of the object """
print("Reader: reading device config")
config = self.read_device_config()
self.source_samples_per_packet = config.get("samples_per_packet", None)
self.sample_rate = config.get("sample_rate", None)
self.config_columns = config.get("column_location", None)
self.data_type = config.get("data_type", "int16")
return config
def set_app_config(self, config):
config["DATA_SOURCE"] = self.name
config["CONFIG_COLUMNS"] = self.config_columns
config["CONFIG_SAMPLE_RATE"] = self.sample_rate
config["SOURCE_SAMPLES_PER_PACKET"] = self.source_samples_per_packet
config["DEVICE_ID"] = self.device_id
config["DATA_TYPE"] = self.data_type
def update_config(self, config):
""" update the objects local config values from the app cache """
self.samples_per_packet = config["CONFIG_SAMPLES_PER_PACKET"]
self.source_samples_per_packet = config["SOURCE_SAMPLES_PER_PACKET"]
self.sample_rate = config["CONFIG_SAMPLE_RATE"]
self.config_columns = config.get("CONFIG_COLUMNS")
self.class_map = config.get("CLASS_MAP")
self.data_type = config.get("DATA_TYPE", "int16")
def connect(self):
if self._thread is None:
"Assume if there is a thread, we are already connected"
self.buffer = CircularBufferQueue(
self._lock, buffer_size=self.packet_buffer_size
)
self.rbuffer = CircularResultsBufferQueue(self._lock, buffer_size=1)
print("Base: Sending subscribe to source")
self._send_subscribe()
time.sleep(1)
self.buffer.reset_buffer()
print("Base: Setting up thread to read source")
self._thread = threading.Thread(target=self._read_source)
self._thread.start()
time.sleep(1)
else:
print("Base: Thread Already Started!")
def disconnect(self):
self.streaming = False
self._thread = None
self._record_thread = None
self.recording = False
self.buffer.reset_buffer()
self.rbuffer.reset_buffer()
def record_start(self, filename):
if not self.streaming:
raise Exception("Must start streaming before begging to record!")
if self.recording:
raise Exception("Only a single recording can occur at one time")
if filename is None:
raise Exception("Invalid Filename")
if not os.path.exists(os.path.dirname(filename)):
print(
"Base: File directory does not exist, recording to data directory in gateway location."
)
if not os.path.exists("./data"):
os.mkdir("./data")
filename = os.path.join("./data", os.path.basename(filename))
self.recording = True
self._record_thread = threading.Thread(
target=self._record_data, kwargs={"filename": filename}
)
self._record_thread.start()
def record_stop(self, filename=None):
if self.recording != True:
raise Exception("Not currently recording")
self._record_thread = None
self.recording = False
return True
def convert_data_to_list(self, data):
num_samples = len(data) // self.data_byte_size
tmp = struct.unpack(self.data_type_str * num_samples, data)
tmp = [x * self.scaling_factor for x in tmp]
for index in range(self.source_samples_per_packet):
yield tmp[index * self.data_width : (index + 1) * self.data_width]
def convert_data_to_int16(self, data):
num_samples = len(data) // self.data_byte_size
tmp = struct.unpack(self.data_type_str * num_samples, data)
sample_data = bytearray(num_samples * 2)
for index in range(num_samples):
# print(tmp[index])
struct.pack_into(
"<" + "h", sample_data, index * 2, int(tmp[index] * self.scaling_factor)
)
return bytes(sample_data)
def get_sml_model_obj(self):
sml = SMLRunner(os.path.join(self.sml_library_path))
sml.init_model()
print("Model initialized")
return sml
def execute_run_sml_model(self, sml, data):
for data_chunk in self.convert_data_to_list(data):
ret = sml.run_model(data_chunk, 0)
if ret >= 0:
print(
self._map_classification({"ModelNumber": 0, "Classification": ret})
)
sml.reset_model(0)
def _map_classification(self, results):
if self.model_json:
results["Classification"] = self.model_json["ModelDescriptions"][
results["ModelNumber"]
]["ClassMaps"][str(results["Classification"])]
elif self.class_map:
results["Classification"] = self.class_map.get(
results["Classification"], results["Classification"]
)
return results
class BaseStreamReaderMixin(object):
def read_data(self):
""" Generator to read the data stream out of the buffer """
print("StreamReader: New stream reader connected")
# name = random.randint(0, 100)
if self._thread:
pass
else:
print("StreamReader: establishing a connectiong to the device.")
self.connect()
self.streaming = True
index = self.buffer.get_latest_buffer()
while self.streaming:
if index is None:
index = self.buffer.get_latest_buffer()
time.sleep(0.1)
continue
if self.buffer.is_buffer_full(index):
data = self.buffer.read_buffer(index)
index = self.buffer.get_next_index(index)
if self.convert_to_int16 and self.data_type_str == "f":
data = self.convert_data_to_int16(data)
if data:
yield data
time.sleep(0.001)
print("StreamReader: Stream Ended")
def _record_data(self, filename):
with open(filename + ".csv", "w", newline="") as csvfile:
datawriter = csv.writer(csvfile, delimiter=",")
print("StreamReader: Recording stream to ", filename + ".csv")
datawriter.writerow(
[
x[0]
for x in sorted(
self.config_columns.items(), key=lambda item: item[1]
)
]
)
struct_info = self.data_type_str * self.data_width
data_reader = self.read_data()
while self.recording:
data = next(data_reader)
if data:
for row_index in range(len(data) // (self.data_width_bytes)):
buff_index = row_index * self.data_width_bytes
datawriter.writerow(
struct.unpack(
struct_info,
data[buff_index : buff_index + self.data_width_bytes],
)
)
print("StreamReader: CSV recording thread finished")
class BaseResultReaderMixin(object):
def read_device_config(self):
print("ResultReader: reading device config")
return {"samples_per_packet": 1}
def read_data(self):
""" Genrator to read the result stream out of the buffer """
print("ResultReader: result read starting")
if self._thread:
pass
else:
print("sent connect")
self.connect()
index = self.rbuffer.get_latest_buffer()
while self.streaming:
if index is None:
index = self.rbuffer.get_latest_buffer()
time.sleep(0.1)
continue
if self.rbuffer.is_buffer_full(index):
data = self.rbuffer.read_buffer(index)
index = self.rbuffer.get_next_index(index)
for result in data:
if self._validate_results_data(result):
result = self._map_classification(json.loads(result))
result["timestap"] = time.time()
print(result)
yield json.dumps(result) + "\n"
else:
time.sleep(0.1)
print("ResultReader: Result stream ended")
def _record_data(self, filename):
with open(filename + ".csv", "w", newline="") as out:
print("ResultReader: Recording results to ", filename + ".csv")
data_reader = self.read_data()
while self.recording:
data = next(data_reader)
if data:
out.write(data)
print("ResultReader: Recording data finished")
|
twisterlib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
import pty
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
import json
from multiprocessing import Lock, Process, Value
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts"))
import edtlib # pylint: disable=unused-import
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
import scl
import expr_parser
logger = logging.getLogger('twister')
logger.setLevel(logging.DEBUG)
class ExecutionCounter(object):
def __init__(self, total=0):
self._done = Value('i', 0)
self._passed = Value('i', 0)
self._skipped_configs = Value('i', 0)
self._skipped_runtime = Value('i', 0)
self._skipped_cases = Value('i', 0)
self._error = Value('i', 0)
self._failed = Value('i', 0)
self._total = Value('i', total)
self._cases = Value('i', 0)
self.lock = Lock()
@property
def cases(self):
with self._cases.get_lock():
return self._cases.value
@cases.setter
def cases(self, value):
with self._cases.get_lock():
self._cases.value = value
@property
def skipped_cases(self):
with self._skipped_cases.get_lock():
return self._skipped_cases.value
@skipped_cases.setter
def skipped_cases(self, value):
with self._skipped_cases.get_lock():
self._skipped_cases.value = value
@property
def error(self):
with self._error.get_lock():
return self._error.value
@error.setter
def error(self, value):
with self._error.get_lock():
self._error.value = value
@property
def done(self):
with self._done.get_lock():
return self._done.value
@done.setter
def done(self, value):
with self._done.get_lock():
self._done.value = value
@property
def passed(self):
with self._passed.get_lock():
return self._passed.value
@passed.setter
def passed(self, value):
with self._passed.get_lock():
self._passed.value = value
@property
def skipped_configs(self):
with self._skipped_configs.get_lock():
return self._skipped_configs.value
@skipped_configs.setter
def skipped_configs(self, value):
with self._skipped_configs.get_lock():
self._skipped_configs.value = value
@property
def skipped_runtime(self):
with self._skipped_runtime.get_lock():
return self._skipped_runtime.value
@skipped_runtime.setter
def skipped_runtime(self, value):
with self._skipped_runtime.get_lock():
self._skipped_runtime.value = value
@property
def failed(self):
with self._failed.get_lock():
return self._failed.value
@failed.setter
def failed(self, value):
with self._failed.get_lock():
self._failed.value = value
@property
def total(self):
with self._total.get_lock():
return self._total.value
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class TwisterException(Exception):
pass
class TwisterRuntimeError(TwisterException):
pass
class ConfigurationError(TwisterException):
def __init__(self, cfile, message):
TwisterException.__init__(self, cfile + ": " + message)
class BuildError(TwisterException):
pass
class ExecutionError(TwisterException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
def set_state(self, state, duration):
self.state = state
self.duration = duration
def get_state(self):
ret = (self.state, self.duration)
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.terminated = False
self.call_west_flash = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
for child in psutil.Process(proc.pid).children(recursive=True):
try:
os.kill(child.pid, signal.SIGTERM)
except ProcessLookupError:
pass
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _output_reader(self, proc):
self.line = proc.stdout.readline()
def _output_handler(self, proc, harness):
log_out_fp = open(self.log, "wt")
timeout_extended = False
timeout_time = time.time() + self.timeout
while True:
this_timeout = timeout_time - time.time()
if this_timeout < 0:
break
reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True)
reader_t.start()
reader_t.join(this_timeout)
if not reader_t.is_alive():
line = self.line
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
else:
reader_t.join(0)
break
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
elif self.call_west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True)
t.start()
t.join()
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
self.try_kill_process_by_pid()
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
if self.coverage:
# Set capture_coverage to True to indicate that right after
# test results we should get coverage data, otherwise we exit
# from the test.
harness.capture_coverage = True
ser.flush()
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
if not harness.capture_coverage:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for d in self.suite.duts:
if fixture and fixture not in d.fixtures:
continue
if d.platform != device or not (d.serial or d.serial_pty):
continue
d.lock.acquire()
avail = False
if d.available:
d.available = 0
d.counter += 1
avail = True
d.lock.release()
if avail:
return d
return None
def make_device_available(self, serial):
for d in self.suite.duts:
if d.serial == serial or d.serial_pty:
d.available = 1
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
runner = None
hardware = self.device_is_available(self.instance)
while not hardware:
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.device_is_available(self.instance)
runner = hardware.runner or self.suite.west_runner
serial_pty = hardware.serial_pty
ser_pty_process = None
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware.serial
logger.debug("Using serial device {}".format(serial_device))
if (self.suite.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash and self.suite.west_flash != []:
command_extra_args.extend(self.suite.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.probe_id or hardware.id
product = hardware.product
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--snr")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.pre_script
post_flash_script = hardware.post_flash_script
post_script = hardware.post_script
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
if serial_pty and ser_pty_process:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state == "timeout":
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
self.instance.reason = "Timeout"
self.instance.results = harness.tests
# sometimes a test instance hasn't been executed successfully with an
# empty dictionary results, in order to include it into final report,
# so fill the results as BLOCK
if self.instance.results == {}:
for k in self.instance.testcase.cases:
self.instance.results[k] = 'BLOCK'
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testcase.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug(f"QEMU ({pid}): {line}")
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
handler.record(harness)
handler_time = time.time() - start_time
logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds")
if out_state == "timeout":
handler.instance.reason = "Timeout"
handler.set_state("failed", handler_time)
elif out_state == "failed":
handler.instance.reason = "Failed"
handler.set_state("failed", handler_time)
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.reason = out_state
handler.set_state("failed", handler_time)
else:
handler.set_state(out_state, handler_time)
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
is_timeout = False
qemu_pid = None
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
# sometimes QEMU can't handle SIGTERM signal correctly
# in that case kill -9 QEMU process directly and leave
# twister to judge testing result by console output
is_timeout = True
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
try:
os.kill(qemu_pid, signal.SIGKILL)
except ProcessLookupError:
pass
proc.wait()
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
proc.terminate()
proc.kill()
self.returncode = proc.returncode
else:
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join()
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.set_state("failed", 0)
if is_timeout:
self.instance.reason = "Timeout"
else:
self.instance.reason = "Exited with {}".format(self.returncode)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise TwisterRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise TwisterRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class TwisterConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new TwisterConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.twister = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = TwisterConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.twister = data.get("twister", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.toolchain_exclude = None
self.toolchain_allow = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise TwisterException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
stc_regex = re.compile(
br"^\s*" # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
# Consume the argument that becomes the extra testcse
br"\(\s*"
br"(?P<stc_name>[a-zA-Z0-9_]+)"
# _setup_teardown() variant has two extra arguments that we ignore
br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
br"\s*\)",
# We don't check how it finishes; we don't care
re.MULTILINE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
suite_regex_match = suite_regex.search(main_c)
if not suite_regex_match:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return None, None
suite_run_match = suite_run_regex.search(main_c)
if not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
achtung_matches = re.findall(
achtung_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
_matches = re.findall(
stc_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "", 1) for match in _matches]
return matches, warnings
def scan_path(self, path):
subcases = []
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
raise TwisterRuntimeError("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.run = False
self.results = {}
def __getstate__(self):
d = self.__dict__.copy()
return d
def __setstate__(self, d):
self.__dict__.update(d)
def __lt__(self, other):
return self.name < other.name
@staticmethod
def testcase_runnable(testcase, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testcase.harness in [ 'console', 'ztest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testcase.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
elif testcase.harness:
can_run = False
else:
can_run = True
return can_run
# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
return False
# we asked for build-only on the command line
if self.testcase.build_only:
return False
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
return False
target_ready = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim"] or \
filter == 'runnable')
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
target_ready = False
if self.platform.simulation == "mdb-nsim":
if not find_executable("mdb"):
target_ready = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
target_ready = False
if self.platform.simulation == "tsim":
if not find_executable("tsim-leon3"):
target_ready = False
testcase_runnable = self.testcase_runnable(self.testcase, fixtures)
return testcase_runnable and target_ready
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "twister/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "twister")
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
if content:
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def fill_results_by_status(self):
"""Fills results according to self.status
The method is used to propagate the instance level status
to the test cases inside. Useful when the whole instance is skipped
and the info is required also at the test cases level for reporting.
Should be used with caution, e.g. should not be used
to fill all results with passes
"""
status_to_verdict = {
'skipped': 'SKIP',
'error': 'BLOCK',
'failure': 'FAILED'
}
for k in self.results:
self.results[k] = status_to_verdict[self.status]
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
log_msg = ""
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg)
if res and not self.overflow_as_errors:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Wa,--fatal-warnings"
gen_defines_args = "--err-on-deprecated-properties"
else:
ldflags = cflags = aflags = ""
gen_defines_args = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DEXTRA_CFLAGS="{cflags}"',
f'-DEXTRA_AFLAGS="{aflags}',
f'-DEXTRA_LDFLAGS="{ldflags}"',
f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}',
f'-G{self.generator}'
]
if self.cmake_only:
cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
@staticmethod
def run_cmake_script(args=[]):
logger.debug("Running cmake script %s" % (args[0]))
cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]]
cmake_args.extend(['-P', args[0]])
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
msg = "Finished running %s" % (args[0])
logger.debug(msg)
results = {"returncode": p.returncode, "msg": msg, "stdout": out}
else:
logger.error("Cmake script failure: %s" % (args[0]))
results = {"returncode": p.returncode}
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-twister.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
self.overflow_as_errors = kwargs.get('overflow_as_errors', False)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif instance.platform.simulation == "tsim":
instance.handler = BinaryHandler(instance, "tsim")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
instance.handler.coverage = self.coverage
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "mdb-nsim":
if find_executable("mdb"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.pid_fn = os.path.join(instance.build_dir, "mdb.pid")
instance.handler.call_west_flash = True
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, pipeline, done, message, lock, results):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
res = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
if self.instance.status is None:
self.instance.status = "passed"
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in res['filter'] and res['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
results.skipped_runtime += 1
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
res = self.build()
if not res:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
# Count skipped cases during build, for example
# due to ram/rom overflow.
inst = res.get("instance", None)
if inst and inst.status == "skipped":
results.skipped_runtime += 1
if res.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run and self.instance.handler:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
logger.debug(f"run status: {self.instance.name} {self.instance.status}")
# to make it work with pickle
self.instance.handler.thread = None
self.instance.handler.suite = None
pipeline.put({
"op": "report",
"test": self.instance,
"status": self.instance.status,
"reason": self.instance.reason
}
)
# Report results and output progress to screen
elif op == "report":
with lock:
done.put(self.instance)
self.report_out(results)
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
if self.device_testing:
self.cleanup_device_testing_artifacts()
else:
self.cleanup_artifacts()
def cleanup_artifacts(self, additional_keep=[]):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow += additional_keep
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def cleanup_device_testing_artifacts(self):
logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir))
sanitizelist = [
'CMakeCache.txt',
'zephyr/runners.yaml',
]
keep = [
'zephyr/zephyr.hex',
'zephyr/zephyr.bin',
'zephyr/zephyr.elf',
]
keep += sanitizelist
self.cleanup_artifacts(keep)
# sanitize paths so files are relocatable
for file in sanitizelist:
file = os.path.join(self.instance.build_dir, file)
with open(file, "rt") as fin:
data = fin.read()
data = data.replace(canonical_zephyr_base+"/", "")
with open(file, "wt") as fin:
fin.write(data)
def report_out(self, results):
total_to_do = results.total - results.skipped_configs
total_tests_width = len(str(total_to_do))
results.done += 1
instance = self.instance
if instance.status in ["error", "failed", "timeout"]:
if instance.status == "error":
results.error += 1
results.failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
results.done, total_tests_width, total_to_do, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
completed_perc = 0
if total_to_do > 0:
completed_perc = int((float(results.done) / total_to_do) * 100)
skipped = results.skipped_configs + results.skipped_runtime
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
results.done,
total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if skipped > 0 else Fore.RESET,
skipped,
Fore.RESET,
Fore.RED if results.failed > 0 else Fore.RESET,
results.failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if os.path.exists(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf")):
overlays.append(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
res = self.run_cmake(args)
return res
def build(self):
res = self.run_build(['--build', self.build_dir])
return res
def run(self):
instance = self.instance
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "testcase-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "release",
"twister_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
self.overflow_as_errors = False
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.platforms = []
self.selected_platforms = []
self.filtered_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
# hardcoded for now
self.duts = []
# run integration tests only
self.integration = False
self.pipeline = None
self.version = "NA"
def check_zephyr_version(self):
try:
subproc = subprocess.run(["git", "describe", "--abbrev=12"],
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=ZEPHYR_BASE)
if subproc.returncode == 0:
self.version = subproc.stdout.strip()
logger.info(f"Zephyr version: {self.version}")
except OSError:
logger.info("Cannot read zephyr version.")
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self, results=None, initial=False):
results.skipped_configs = 0
results.skipped_cases = 0
for instance in self.instances.values():
if initial:
results.cases += len(instance.testcase.cases)
if instance.status == 'skipped':
results.skipped_configs += 1
results.skipped_cases += len(instance.testcase.cases)
elif instance.status == "passed":
results.passed += 1
for res in instance.results.values():
if res == 'SKIP':
results.skipped_cases += 1
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, results, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics.get('handler_time', None):
run += 1
if results.total and results.total != results.skipped_configs:
pass_rate = (float(results.passed) / float(results.total - results.skipped_configs))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
results.passed,
results.total - results.skipped_configs,
Fore.RESET,
pass_rate,
Fore.RED if results.failed else Fore.RESET,
results.failed,
Fore.RESET,
results.skipped_configs,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
# if we are only building, do not report about tests being executed.
if self.platforms and not self.build_only:
logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format(
results.cases - results.skipped_cases,
results.skipped_cases,
len(self.filtered_platforms),
self.total_platforms,
(100 * len(self.filtered_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \
{Fore.RED}{results.total - run - results.skipped_configs}{Fore.RESET} test configurations were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed, platform_reports, json_report):
if not self.instances:
return
logger.info("Saving reports...")
if name:
report_name = name
else:
report_name = "twister"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False,
append=only_failed, version=self.version)
self.xunit_report(filename + "_report.xml", full_report=True,
append=only_failed, version=self.version)
self.csv_report(filename + ".csv")
if json_report:
self.json_report(filename + ".json", append=only_failed, version=self.version)
if platform_reports:
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
try:
platform = Platform()
platform.load(file)
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.twister:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/verify-toolchain.cmake')
result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"])
try:
if result['returncode']:
raise TwisterRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined")
except Exception as e:
print(str(e))
sys.exit(2)
toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT']
logger.info(f"Using '{toolchain}' toolchain.")
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
dirnames[:] = []
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = TwisterConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_allow = tc_dict["arch_allow"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_allow = tc_dict["platform_allow"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_allow = tc_dict["toolchain_allow"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.integration_platforms = tc_dict["integration_platforms"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
return len(self.testcases)
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_from_file(self, file, filter_status=[], filter_platform=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
if filter_platform and platform.name not in filter_platform:
continue
instance = TestInstance(self.testcases[test], platform, self.outdir)
if self.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
runnable = kwargs.get('runnable')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
elif arch_filter:
platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms))
elif default_platforms:
platforms = list(filter(lambda p: p.default, self.platforms))
else:
platforms = self.platforms
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
if tc.build_on_all and not platform_filter:
platform_scope = self.platforms
elif tc.integration_platforms and self.integration:
platform_scope = list(filter(lambda item: item.name in tc.integration_platforms, \
self.platforms))
else:
platform_scope = platforms
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platform_scope:
instance = TestInstance(tc, plat, self.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None
if runnable and self.duts:
for h in self.duts:
if h.platform == plat.name:
if tc.harness_config.get('fixture') in h.fixtures:
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if runnable and not instance.run:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if tc.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testcase tag filter")
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testcase exclude filter")
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = discards.get(instance, "Testcase name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testcase arch filter")
if not force_platform:
if tc.arch_allow and plat.arch not in tc.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if tc.platform_allow and plat.name not in tc.platform_allow:
discards[instance] = discards.get(instance, "Not in testcase platform allow list")
if tc.toolchain_allow and toolchain not in tc.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testcase toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and tc.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < tc.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < tc.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
integration = self.integration and tc.integration_platforms
# if twister was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all and not integration:
if tc.platform_allow:
a = set(self.default_platforms)
b = set(tc.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list[:1])
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
elif integration:
instances = list(filter(lambda item: item.platform.name in tc.integration_platforms, instance_list))
self.add_instances(instances)
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
for instance in self.discards:
instance.reason = self.discards[instance]
instance.status = "skipped"
instance.fill_results_by_status()
self.filtered_platforms = set(p.platform.name for p in self.instances.values()
if p.status != "skipped" )
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
@staticmethod
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False):
for instance in self.instances.values():
if build_only:
instance.run = False
if test_only and instance.run:
pipeline.put({"op": "run", "test": instance})
else:
if instance.status not in ['passed', 'skipped', 'error']:
logger.debug(f"adding {instance.name}")
instance.status = None
pipeline.put({"op": "cmake", "test": instance})
def pipeline_mgr(self, pipeline, done_queue, lock, results):
while True:
try:
task = pipeline.get_nowait()
except queue.Empty:
break
else:
test = task['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors,
overflow_as_errors=self.overflow_as_errors
)
pb.process(pipeline, done_queue, task, lock, results)
return True
def execute(self, pipeline, done, results):
lock = Lock()
logger.info("Adding tasks to the queue...")
self.add_tasks_to_queue(pipeline, self.build_only, self.test_only)
logger.info("Added initial list of jobs to queue")
processes = []
for job in range(self.jobs):
logger.debug(f"Launch process {job}")
p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, ))
processes.append(p)
p.start()
try:
for p in processes:
p.join()
except KeyboardInterrupt:
logger.info("Execution interrupted")
for p in processes:
p.terminate()
# FIXME: This needs to move out.
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(self.calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
return results
def discard_report(self, filename):
try:
if not self.discards:
raise TwisterRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True,
append=append, version=self.version)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False, version="NA"):
total = 0
fails = passes = errors = skips = 0
if platform:
selected = [platform]
logger.info(f"Writing target report for {platform}...")
else:
logger.info(f"Writing xunit report {filename}...")
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report and instance.run:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP' or instance.status in ['skipped']:
skips += 1
else:
fails += 1
else:
if instance.status in ["error", "failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
elif instance.status == 'passed':
passes += 1
else:
if instance.status:
logger.error(f"{instance.name}: Unknown status {instance.status}")
else:
logger.error(f"{instance.name}: No status")
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skipped'] = "%d" % skips
else:
logger.info(f"Did not find any existing results for {p}")
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
for _, instance in inst.items():
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK'] or \
(not instance.run and instance.status in ["error", "failed", "timeout"]):
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message="failed")
log_root = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(log_root, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'PASS' \
or (not instance.run and instance.status in ["passed"]):
pass
elif instance.results[k] == 'SKIP' or (instance.status in ["skipped"]):
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason)
else:
el = ET.SubElement(
eleTestcase,
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"][@name="{instance.testcase.name}"]'):
eleTestsuite.remove(tc)
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["error", "failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
log_root = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(log_root, "build.log")
hl = os.path.join(log_root, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
return fails, passes, errors, skips
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def json_report(self, filename, append=False, version="NA"):
logger.info(f"Writing JSON report {filename}")
report = {}
selected = self.selected_platforms
report["environment"] = {"os": os.name,
"zephyr_version": version,
"toolchain": self.get_toolchain()
}
json_data = {}
if os.path.exists(filename) and append:
with open(filename, 'r') as json_file:
json_data = json.load(json_file)
suites = json_data.get("testsuites", [])
if suites:
suite = suites[0]
testcases = suite.get("testcases", [])
else:
suite = {}
testcases = []
for p in selected:
inst = self.get_platform_instances(p)
for _, instance in inst.items():
testcase = {}
handler_log = os.path.join(instance.build_dir, "handler.log")
build_log = os.path.join(instance.build_dir, "build.log")
device_log = os.path.join(instance.build_dir, "device.log")
handler_time = instance.metrics.get('handler_time', 0)
ram_size = instance.metrics.get ("ram_size", 0)
rom_size = instance.metrics.get("rom_size",0)
for k in instance.results.keys():
testcases = list(filter(lambda d: not (d.get('testcase') == k and d.get('platform') == p), testcases ))
testcase = {"testcase": k,
"arch": instance.platform.arch,
"platform": p,
}
if instance.results[k] in ["PASS"]:
testcase["status"] = "passed"
if instance.handler:
testcase["execution_time"] = handler_time
if ram_size:
testcase["ram_size"] = ram_size
if rom_size:
testcase["rom_size"] = rom_size
elif instance.results[k] in ['FAIL', 'BLOCK'] or instance.status in ["error", "failed", "timeout"]:
testcase["status"] = "failed"
testcase["reason"] = instance.reason
testcase["execution_time"] = handler_time
if os.path.exists(handler_log):
testcase["test_output"] = self.process_log(handler_log)
elif os.path.exists(device_log):
testcase["device_log"] = self.process_log(device_log)
else:
testcase["build_log"] = self.process_log(build_log)
else:
testcase["status"] = "skipped"
testcase["reason"] = instance.reason
testcases.append(testcase)
suites = [ {"testcases": testcases} ]
report["testsuites"] = suites
with open(filename, "wt") as json_file:
json.dump(report, json_file, indent=4, separators=(',',':'))
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
logger.debug(f"Select {tool} as the coverage tool...")
return t
@staticmethod
def retrieve_gcov_data(intput_file):
logger.debug("Working on %s" % intput_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(intput_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append('*/' + pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
cmd = ["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(".*/" + pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o",
coveragefile, outdir]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class DUT(object):
def __init__(self,
id=None,
serial=None,
platform=None,
product=None,
serial_pty=None,
connected=False,
pre_script=None,
post_script=None,
post_flash_script=None,
runner=None):
self.serial = serial
self.platform = platform
self.serial_pty = serial_pty
self._counter = Value("i", 0)
self._available = Value("i", 1)
self.connected = connected
self.pre_script = pre_script
self.id = id
self.product = product
self.runner = runner
self.fixtures = []
self.post_flash_script = post_flash_script
self.post_script = post_script
self.pre_script = pre_script
self.probe_id = None
self.notes = None
self.lock = Lock()
self.match = False
@property
def available(self):
with self._available.get_lock():
return self._available.value
@available.setter
def available(self, value):
with self._available.get_lock():
self._available.value = value
@property
def counter(self):
with self._counter.get_lock():
return self._counter.value
@counter.setter
def counter(self, value):
with self._counter.get_lock():
self._counter.value = value
def to_dict(self):
d = {}
exclude = ['_available', '_counter', 'match']
v = vars(self)
for k in v.keys():
if k not in exclude and v[k]:
d[k] = v[k]
return d
def __repr__(self):
return f"<{self.platform} ({self.product}) on {self.serial}>"
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.duts = []
def add_device(self, serial, platform, pre_script, is_pty):
device = DUT(platform=platform, connected=True, pre_script=pre_script)
if is_pty:
device.serial_pty = serial
else:
device.serial = serial
self.duts.append(device)
def load(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
duts = scl.yaml_load_verify(map_file, hwm_schema)
for dut in duts:
pre_script = dut.get('pre_script')
post_script = dut.get('post_script')
post_flash_script = dut.get('post_flash_script')
platform = dut.get('platform')
id = dut.get('id')
runner = dut.get('runner')
serial = dut.get('serial')
product = dut.get('product')
fixtures = dut.get('fixtures', [])
new_dut = DUT(platform=platform,
product=product,
runner=runner,
id=id,
serial=serial,
connected=serial is not None,
pre_script=pre_script,
post_script=post_script,
post_flash_script=post_flash_script)
new_dut.fixtures = fixtures
new_dut.counter = 0
self.duts.append(new_dut)
def scan(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = DUT(platform="unknown",
id=d.serial_number,
serial=persistent_map.get(d.device, d.device),
product=d.product,
runner='unknown')
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev.runner = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev.runner = runner
s_dev.connected = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def save(self, hwm_file):
# use existing map
self.detected.sort(key=lambda x: x.serial or '')
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
if hwm:
hwm.sort(key=lambda x: x['serial'] or '')
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for _detected in self.detected:
for h in hwm:
if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match:
h['connected'] = True
h['serial'] = _detected.serial
_detected.match = True
new_duts = list(filter(lambda d: not d.match, self.detected))
new = []
for d in new_duts:
new.append(d.to_dict())
if hwm:
hwm = hwm + new
else:
hwm = new
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
self.load(hwm_file)
logger.info("Registered devices:")
self.dump()
else:
# create new file
dl = []
for _connected in self.detected:
platform = _connected.platform
id = _connected.id
runner = _connected.runner
serial = _connected.serial
product = _connected.product
d = {
'platform': platform,
'id': id,
'runner': runner,
'serial': serial,
'product': product
}
dl.append(d)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(detected=True)
def dump(self, filtered=[], header=[], connected_only=False, detected=False):
print("")
table = []
if detected:
to_show = self.detected
else:
to_show = self.duts
if not header:
header = ["Platform", "ID", "Serial device"]
for p in to_show:
platform = p.platform
connected = p.connected
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.id, p.serial])
print(tabulate(table, headers=header, tablefmt="github"))
def size_report(sc):
logger.info(sc.filename)
logger.info("SECTION NAME VMA LMA SIZE HEX SZ TYPE")
for i in range(len(sc.sections)):
v = sc.sections[i]
logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
(v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
v["type"]))
logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
(sc.rom_size, sc.ram_size))
logger.info("")
def export_tests(filename, tests):
with open(filename, "wt") as csvfile:
fieldnames = ['section', 'subsection', 'title', 'reference']
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
for test in tests:
data = test.split(".")
if len(data) > 1:
subsec = " ".join(data[1].split("_")).title()
rowdict = {
"section": data[0].capitalize(),
"subsection": subsec,
"title": test,
"reference": test
}
cw.writerow(rowdict)
else:
logger.info("{} can't be exported".format(test))
|
hdfs_utils.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""hdfs_utils.py will move to fluid/incubate/fleet/utils/hdfs.py"""
import os
import sys
import subprocess
import multiprocessing
from datetime import datetime
import re
import copy
import errno
import logging
from paddle.fluid.log_helper import get_logger
__all__ = ["HDFSClient", "multi_download", "multi_upload"]
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
class HDFSClient(object):
r"""
A tool of HDFS
Args:
hadoop_home (string): hadoop_home
configs (dict): hadoop config, it is a dict, please contain \
key "fs.default.name" and "hadoop.job.ugi"
Can be a float value
Examples:
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
client.ls("/user/com/train-25")
files = client.lsr("/user/com/train-25/models")
"""
def __init__(self, hadoop_home, configs):
self.pre_commands = []
hadoop_bin = '%s/bin/hadoop' % hadoop_home
self.pre_commands.append(hadoop_bin)
dfs = 'fs'
self.pre_commands.append(dfs)
for k, v in configs.items():
config_command = '-D%s=%s' % (k, v)
self.pre_commands.append(config_command)
def __run_hdfs_cmd(self, commands, retry_times=5):
whole_commands = copy.deepcopy(self.pre_commands)
whole_commands.extend(commands)
print('Running system command: {0}'.format(' '.join(whole_commands)))
ret_code = 0
ret_out = None
ret_err = None
whole_commands = " ".join(whole_commands)
for x in range(retry_times + 1):
proc = subprocess.Popen(
whole_commands,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
(output, errors) = proc.communicate()
ret_code, ret_out, ret_err = proc.returncode, output, errors
if ret_code:
_logger.warn(
'Times: %d, Error running command: %s. Return code: %d, Error: %s'
% (x, ' '.join(whole_commands), proc.returncode, errors))
else:
break
return ret_code, ret_out, ret_err
def upload(self, hdfs_path, local_path, overwrite=False, retry_times=5):
"""
upload the local file to hdfs
Args:
hdfs_path(str): the hdfs file path
local_path(str): the local file path
overwrite(bool|None): will overwrite the file on HDFS or not
retry_times(int|5): retry times
Returns:
True or False
"""
assert hdfs_path is not None
assert local_path is not None and os.path.exists(local_path)
if os.path.isdir(local_path):
_logger.warn(
"The Local path: {} is dir and I will support it later, return".
format(local_path))
return False
base = os.path.basename(local_path)
if not self.is_exist(hdfs_path):
self.makedirs(hdfs_path)
else:
if self.is_exist(os.path.join(hdfs_path, base)):
if overwrite:
_logger.error(
"The HDFS path: {} is exist and overwrite is True, delete it".
format(hdfs_path))
self.delete(hdfs_path)
else:
_logger.error(
"The HDFS path: {} is exist and overwrite is False, return".
format(hdfs_path))
return False
put_commands = ["-put", local_path, hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(put_commands,
retry_times)
if returncode:
_logger.error("Put local path: {} to HDFS path: {} failed".format(
local_path, hdfs_path))
return False
else:
_logger.info("Put local path: {} to HDFS path: {} successfully".
format(local_path, hdfs_path))
return True
def download(self, hdfs_path, local_path, overwrite=False, unzip=False):
"""
download file from HDFS
Args:
hdfs_path(str): the hdfs file path
local_path(str): the local file path
overwrite(bool|None): will overwrite the file on HDFS or not
unzip(bool|False): if the download file is compressed by zip, unzip it or not.
Returns:
True or False
"""
_logger.info('Downloading %r to %r.', hdfs_path, local_path)
_logger.info('Download of %s to %r complete.', hdfs_path, local_path)
if not self.is_exist(hdfs_path):
print("HDFS path: {} do not exist".format(hdfs_path))
return False
if self.is_dir(hdfs_path):
_logger.error(
"The HDFS path: {} is dir and I will support it later, return".
format(hdfs_path))
if os.path.exists(local_path):
base = os.path.basename(hdfs_path)
local_file = os.path.join(local_path, base)
if os.path.exists(local_file):
if overwrite:
os.remove(local_file)
else:
_logger.error(
"The Local path: {} is exist and overwrite is False, return".
format(local_file))
return False
self.make_local_dirs(local_path)
download_commands = ["-get", hdfs_path, local_path]
returncode, output, errors = self.__run_hdfs_cmd(download_commands)
if returncode:
_logger.error("Get local path: {} from HDFS path: {} failed".format(
local_path, hdfs_path))
return False
else:
_logger.info("Get local path: {} from HDFS path: {} successfully".
format(local_path, hdfs_path))
return True
def is_exist(self, hdfs_path=None):
"""
whether the remote HDFS path exists
Args:
hdfs_path(str): the hdfs file path
Returns:
True or False
"""
exist_cmd = ['-test', '-e', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(
exist_cmd, retry_times=1)
if returncode:
_logger.error("HDFS is_exist HDFS path: {} failed".format(
hdfs_path))
return False
else:
_logger.info("HDFS is_exist HDFS path: {} successfully".format(
hdfs_path))
return True
def is_dir(self, hdfs_path=None):
"""
whether the remote HDFS path is directory
Args:
hdfs_path(str): the hdfs file path
Returns:
True or False
"""
if not self.is_exist(hdfs_path):
return False
dir_cmd = ['-test', '-d', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(dir_cmd, retry_times=1)
if returncode:
_logger.error("HDFS path: {} failed is not a directory".format(
hdfs_path))
return False
else:
_logger.info("HDFS path: {} successfully is a directory".format(
hdfs_path))
return True
def delete(self, hdfs_path):
"""
Remove a file or directory from HDFS.
whether the remote HDFS path exists
Args:
hdfs_path: HDFS path.
Returns:
True or False
This function returns `True` if the deletion was successful and `False` if
no file or directory previously existed at `hdfs_path`.
"""
_logger.info('Deleting %r.', hdfs_path)
if not self.is_exist(hdfs_path):
_logger.warn("HDFS path: {} do not exist".format(hdfs_path))
return True
if self.is_dir(hdfs_path):
del_cmd = ['-rmr', hdfs_path]
else:
del_cmd = ['-rm', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(del_cmd, retry_times=0)
if returncode:
_logger.error("HDFS path: {} delete files failure".format(
hdfs_path))
return False
else:
_logger.info("HDFS path: {} delete files successfully".format(
hdfs_path))
return True
def rename(self, hdfs_src_path, hdfs_dst_path, overwrite=False):
"""
Move a file or folder on HDFS.
Args:
hdfs_path(str): HDFS path.
overwrite(bool|False): If the path already exists and overwrite is False, will return False.
Returns:
True or False
"""
assert hdfs_src_path is not None
assert hdfs_dst_path is not None
if not self.is_exist(hdfs_src_path):
_logger.info("HDFS path do not exist: {}".format(hdfs_src_path))
if self.is_exist(hdfs_dst_path) and not overwrite:
_logger.error("HDFS path is exist: {} and overwrite=False".format(
hdfs_dst_path))
rename_command = ['-mv', hdfs_src_path, hdfs_dst_path]
returncode, output, errors = self.__run_hdfs_cmd(
rename_command, retry_times=1)
if returncode:
_logger.error("HDFS rename path: {} to {} failed".format(
hdfs_src_path, hdfs_dst_path))
return False
else:
_logger.info("HDFS rename path: {} to {} successfully".format(
hdfs_src_path, hdfs_dst_path))
return True
@staticmethod
def make_local_dirs(local_path):
"""
create a directory local, is same to mkdir
Args:
local_path: local path that wants to create a directory.
"""
try:
os.makedirs(local_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def makedirs(self, hdfs_path):
"""
Create a remote directory, recursively if necessary.
Args:
hdfs_path(str): Remote path. Intermediate directories will be created appropriately.
Returns:
True or False
"""
_logger.info('Creating directories to %r.', hdfs_path)
assert hdfs_path is not None
if self.is_exist(hdfs_path):
_logger.error("HDFS path is exist: {}".format(hdfs_path))
return
mkdirs_commands = ['-mkdir', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(
mkdirs_commands, retry_times=1)
if returncode:
_logger.error("HDFS mkdir path: {} failed".format(hdfs_path))
return False
else:
_logger.error("HDFS mkdir path: {} successfully".format(hdfs_path))
return True
def ls(self, hdfs_path):
"""
ls directory contents about HDFS hdfs_path
Args:
hdfs_path(str): Remote HDFS path will be ls.
Returns:
List: a contents list about hdfs_path.
"""
assert hdfs_path is not None
if not self.is_exist(hdfs_path):
return []
ls_commands = ['-ls', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(
ls_commands, retry_times=1)
if returncode:
_logger.error("HDFS list path: {} failed".format(hdfs_path))
return []
else:
_logger.info("HDFS list path: {} successfully".format(hdfs_path))
ret_lines = []
regex = re.compile(r'\s+')
out_lines = output.strip().split("\n")
for line in out_lines:
re_line = regex.split(line)
if len(re_line) == 8:
ret_lines.append(re_line[7])
return ret_lines
def lsr(self, hdfs_path, only_file=True, sort=True):
"""
list directory contents about HDFS hdfs_path recursively
Args:
hdfs_path(str): Remote HDFS path.
only_file(bool|True): will discard folders.
sort(bool|True): will be sorted by create time.
Returns:
List: a contents list about hdfs_path.
"""
def sort_by_time(v1, v2):
v1_time = datetime.strptime(v1[1], '%Y-%m-%d %H:%M')
v2_time = datetime.strptime(v2[1], '%Y-%m-%d %H:%M')
return v1_time > v2_time
assert hdfs_path is not None
if not self.is_exist(hdfs_path):
return []
ls_commands = ['-lsr', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(
ls_commands, retry_times=1)
if returncode:
_logger.error("HDFS list all files: {} failed".format(hdfs_path))
return []
else:
_logger.info("HDFS list all files: {} successfully".format(
hdfs_path))
lines = []
regex = re.compile(r'\s+')
out_lines = output.strip().split("\n")
for line in out_lines:
re_line = regex.split(line)
if len(re_line) == 8:
if only_file and re_line[0][0] == "d":
continue
else:
lines.append(
(re_line[7], re_line[5] + " " + re_line[6]))
if sort:
sorted(lines, cmp=sort_by_time)
ret_lines = [ret[0] for ret in lines]
return ret_lines
def multi_download(client,
hdfs_path,
local_path,
trainer_id,
trainers,
multi_processes=5):
"""
Download files from HDFS using multi process.
Args:
client(HDFSClient): instance of HDFSClient
hdfs_path(str): path on hdfs
local_path(str): path on local
trainer_id(int): current trainer id
trainers(int): all trainers number
multi_processes(int|5): the download data process at the same time, default=5
Returns:
List:
Download files in local folder.
"""
def __subprocess_download(datas):
for data in datas:
re_path = os.path.relpath(os.path.dirname(data), hdfs_path)
if re_path == os.curdir:
sub_local_re_path = local_path
else:
sub_local_re_path = os.path.join(local_path, re_path)
client.download(data, sub_local_re_path)
assert isinstance(client, HDFSClient)
client.make_local_dirs(local_path)
_logger.info("Make local dir {} successfully".format(local_path))
all_need_download = client.lsr(hdfs_path, sort=True)
need_download = all_need_download[trainer_id::trainers]
_logger.info("Get {} files From all {} files need to be download from {}".
format(len(need_download), len(all_need_download), hdfs_path))
_logger.info("Start {} multi process to download datas".format(
multi_processes))
procs = []
for i in range(multi_processes):
process_datas = need_download[i::multi_processes]
p = multiprocessing.Process(
target=__subprocess_download, args=(process_datas, ))
procs.append(p)
p.start()
# complete the processes
for proc in procs:
proc.join()
_logger.info("Finish {} multi process to download datas".format(
multi_processes))
local_downloads = []
for data in need_download:
data_name = os.path.basename(data)
re_path = os.path.relpath(os.path.dirname(data), hdfs_path)
if re_path == os.curdir:
local_re_path = os.path.join(local_path, data_name)
else:
local_re_path = os.path.join(local_path, re_path, data_name)
local_downloads.append(local_re_path)
return local_downloads
def getfilelist(path):
rlist = []
for dir, folder, file in os.walk(path):
for i in file:
t = os.path.join(dir, i)
rlist.append(t)
for r in rlist:
print(r)
def multi_upload(client,
hdfs_path,
local_path,
multi_processes=5,
overwrite=False,
sync=True):
"""
Upload files to HDFS using multi process.
Args:
client(HDFSClient): instance of HDFSClient
hdfs_path(str): path on hdfs
local_path(str): path on local
multi_processes(int|5): the upload data process at the same time, default=5
overwrite(bool|False): will overwrite file on HDFS or not
sync(bool|True): upload files sync or not.
Returns:
None
"""
def __subprocess_upload(datas):
for data in datas:
re_path = os.path.relpath(os.path.dirname(data), local_path)
hdfs_re_path = os.path.join(hdfs_path, re_path)
client.upload(hdfs_re_path, data, overwrite, retry_times=5)
def get_local_files(path):
rlist = []
if not os.path.isdir(path):
return rlist
for dirname, folder, files in os.walk(path):
for i in files:
t = os.path.join(dirname, i)
rlist.append(t)
return rlist
assert isinstance(client, HDFSClient)
all_files = get_local_files(local_path)
if not all_files:
_logger.info("there are nothing need to upload, exit")
return
_logger.info("Start {} multi process to upload datas".format(
multi_processes))
procs = []
for i in range(multi_processes):
process_datas = all_files[i::multi_processes]
p = multiprocessing.Process(
target=__subprocess_upload, args=(process_datas, ))
procs.append(p)
p.start()
# complete the processes
for proc in procs:
proc.join()
_logger.info("Finish {} multi process to upload datas".format(
multi_processes))
if __name__ == "__main__":
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
client.ls("/user/com/train-25")
files = client.lsr("/user/com/train-25/models")
downloads = multi_download(
client,
"/user/com/train-25/model",
"/home/xx/data1",
1,
5,
100,
multi_processes=5)
multi_upload(client, "/user/com/train-25/model", "/home/xx/data1")
|
turn_classification.py
|
import cv2
import threading
import tensorflow as tf
import numpy as np
import time
import capturer
from utils.circularBuffer import CircularBuffer
labels = ['Left Turn', 'No Turn', 'Right Turn']
model_path = "./turn_classification/turn_classification_model_final_v1.h5"
readings_buffer_size = 20
image_preprocessing_dimens = (100, 100)
detection_threshold = 0.5
class TurnClassification:
def __init__(self):
self.model = tf.keras.models.load_model(model_path)
self.readings_buffer = CircularBuffer(readings_buffer_size, noneOverridePercent=0.5)
self.images_queue = CircularBuffer(1)
self.classifier_queue = CircularBuffer(1)
threading.Thread(target=self.classification_starter).start()
def capture_processing(self):
while True:
try:
frame = capturer.get_images().get_last()
if frame is not None:
preprocessed_frame = cv2.resize(frame, image_preprocessing_dimens, interpolation=cv2.INTER_LINEAR)
self.images_queue.add(np.expand_dims(preprocessed_frame, 0))
except Exception as e:
print("Capturing Not Working", e)
def classification_starter(self):
threading.Thread(target=self.capture_processing).start()
while True:
try:
self.perform_inference(self.images_queue.get_last())
except Exception as e:
print("Classification Not Working", e)
def perform_inference(self, image):
feedforward_result = self.model.predict(image).tolist()[0]
self.readings_buffer.add(None if feedforward_result == None or max(feedforward_result) < detection_threshold else feedforward_result)
averaged_result = self.readings_buffer.mean()
# print(averaged_result)
self.classifier_queue.add("No Turn" if averaged_result is None else labels[np.argmax(averaged_result)])
def get_inference(self):
return self.classifier_queue.get_last()
|
test_context.py
|
import pytest
import time
from threading import Thread
from threading import Event
from p4client.p4grpc import P4RuntimeGRPC
from packettest.test_context import TestContext
from simple_switch.simple_switch_runner import make_switch
merged_config = {
'switch_name': 'meow',
'network_name': 'meow_net',
'device_id': 0,
'election_high': 0,
'election_low': 1,
'grpc_port': 9559
}
def make_bmv_context(config, compiled, p4info, control_function=None,
configure=None, log_level='info'):
merged_config.update(config)
@pytest.fixture(scope='module')
def context_():
bmv2 = make_switch(config,
merged_config.get('switch_name'),
merged_config.get('network_name'),
merged_config.get('grpc_port'))
bmv2.launch(log_level=log_level)
continue_event = Event()
shutdown_event = Event()
def wait_start_and_log_stream():
''' Start reading the logs and trigger an event when the switch
application has started the thrift server to test for readiness.
While not perfect as it isn't testing the gRPC interface, it is a
good (read: only) proxy for doing so.'''
for line in bmv2.stream():
line = line.decode('utf-8').strip()
if 'Thrift server was started' in line:
continue_event.set()
print(line)
logs = Thread(target=wait_start_and_log_stream)
logs.start()
continue_event.wait()
grpc_port = merged_config.get('grpc_port')
controller = P4RuntimeGRPC(
host=f'localhost:{grpc_port}',
device_id=merged_config.get('device_id'),
election_high=merged_config.get('election_high'),
election_low=merged_config.get('election_low')
)
controller.master_arbitration_update()
info_data = open(p4info, 'rb').read()
bin_data = open(compiled, 'rb').read()
controller._set_info(info_data)
controller.configure_forwarding_pipeline(bin_data)
if configure is not None:
configure(controller)
if control_function is not None:
controller_thread = Thread(target=control_function, args=[
controller,
shutdown_event])
controller_thread.start()
time.sleep(1)
yield TestContext()
if controller is not None:
controller.tear_down_stream()
bmv2.kill()
shutdown_event.set()
if controller is not None:
controller_thread.join()
logs.join()
return context_
|
train_navigation_agent.py
|
# Trains the navigation module using random start and end points.
import numpy as np
import pdb
import os
import random
import threading
import tensorflow as tf
import time
from networks.free_space_network import FreeSpaceNetwork
from supervised.sequence_generator import SequenceGenerator
from utils import tf_util
import constants
data_buffer = []
data_counts = np.full(constants.REPLAY_BUFFER_SIZE, 9999)
os.environ["CUDA_VISIBLE_DEVICES"] = str(constants.GPU_ID)
def run():
try:
with tf.variable_scope('nav_global_network'):
network = FreeSpaceNetwork(constants.GRU_SIZE, constants.BATCH_SIZE, constants.NUM_UNROLLS)
network.create_net()
training_step = network.training_op
with tf.variable_scope('loss'):
loss_summary_op = tf.summary.merge([
tf.summary.scalar('loss', network.loss),
])
summary_full = tf.summary.merge_all()
conv_var_list = [v for v in tf.trainable_variables() if 'conv' in v.name and 'weight' in v.name and
(v.get_shape().as_list()[0] != 1 or v.get_shape().as_list()[1] != 1)]
for var in conv_var_list:
tf_util.conv_variable_summaries(var, scope=var.name.replace('/', '_')[:-2])
summary_with_images = tf.summary.merge_all()
# prepare session
sess = tf_util.Session()
seq_inds = np.zeros((constants.BATCH_SIZE, 2), dtype=np.int32)
sequence_generators = []
for thread_index in range(constants.PARALLEL_SIZE):
gpus = str(constants.GPU_ID).split(',')
sequence_generator = SequenceGenerator(sess)
sequence_generators.append(sequence_generator)
sess.run(tf.global_variables_initializer())
if not (constants.DEBUG or constants.DRAWING):
from utils import py_util
time_str = py_util.get_time_str()
summary_writer = tf.summary.FileWriter(os.path.join(constants.LOG_FILE, time_str), sess.graph)
else:
summary_writer = None
saver = tf.train.Saver(max_to_keep=3)
# init or load checkpoint
start_it = tf_util.restore_from_dir(sess, constants.CHECKPOINT_DIR)
sess.graph.finalize()
data_lock = threading.Lock()
def load_new_data(thread_index):
global data_buffer
global data_counts
sequence_generator = sequence_generators[thread_index]
counter = 0
while True:
while not (len(data_buffer) < constants.REPLAY_BUFFER_SIZE or np.max(data_counts) > 0):
time.sleep(1)
counter += 1
if constants.DEBUG:
print('\nThread %d' % thread_index)
new_data, bounds, goal_pose = sequence_generator.generate_episode()
new_data = {key : ([new_data[ii][key] for ii in range(len(new_data))]) for key in new_data[0]}
new_data['goal_pose'] = goal_pose
new_data['memory'] = np.zeros(
(constants.SPATIAL_MAP_HEIGHT, constants.SPATIAL_MAP_WIDTH, constants.MEMORY_SIZE))
new_data['gru_state'] = np.zeros(constants.GRU_SIZE)
if constants.DRAWING:
new_data['debug_images'] = sequence_generator.debug_images
data_lock.acquire()
if len(data_buffer) < constants.REPLAY_BUFFER_SIZE:
data_counts[len(data_buffer)] = 0
data_buffer.append(new_data)
counts = data_counts[:len(data_buffer)]
if counter % 10 == 0:
print('Buffer size %d Num used %d Max used amount %d' % (len(data_buffer), len(counts[counts > 0]), np.max(counts)))
else:
max_count_ind = np.argmax(data_counts)
data_buffer[max_count_ind] = new_data
data_counts[max_count_ind] = 0
if counter % 10 == 0:
print('Num used %d Max used amount %d' % (len(data_counts[data_counts > 0]), np.max(data_counts)))
data_lock.release()
threads = []
for i in range(constants.PARALLEL_SIZE):
load_data_thread = threading.Thread(target=load_new_data, args=(i,))
load_data_thread.daemon = True
load_data_thread.start()
threads.append(load_data_thread)
time.sleep(1)
sequences = [None] * constants.BATCH_SIZE
curr_it = 0
dataTimeTotal = 0.00001
solverTimeTotal = 0.00001
summaryTimeTotal = 0.00001
totalTimeTotal = 0.00001
chosen_inds = set()
loc_to_chosen_ind = {}
for iteration in range(start_it, constants.MAX_TIME_STEP):
if iteration == start_it or iteration % 10 == 1:
currentTimeStart = time.time()
tStart = time.time()
batch_data = []
batch_action = []
batch_memory = []
batch_gru_state = []
batch_labels = []
batch_pose = []
batch_mask = []
batch_goal_pose = []
batch_pose_indicator = []
batch_possible_label = []
batch_debug_images = []
for bb in range(constants.BATCH_SIZE):
if seq_inds[bb, 0] == seq_inds[bb, 1]:
# Pick a new random sequence
pickable_inds = set(np.where(data_counts < 100)[0]) - chosen_inds
count_size = len(pickable_inds)
while count_size == 0:
pickable_inds = set(np.where(data_counts < 100)[0]) - chosen_inds
count_size = len(pickable_inds)
time.sleep(1)
random_ind = random.sample(pickable_inds, 1)[0]
data_lock.acquire()
sequences[bb] = data_buffer[random_ind]
goal_pose = sequences[bb]['goal_pose']
sequences[bb]['memory'] = np.zeros(
(constants.SPATIAL_MAP_HEIGHT, constants.SPATIAL_MAP_WIDTH, constants.MEMORY_SIZE))
sequences[bb]['gru_state'] = np.zeros(constants.GRU_SIZE)
data_counts[random_ind] += 1
if bb in loc_to_chosen_ind:
chosen_inds.remove(loc_to_chosen_ind[bb])
loc_to_chosen_ind[bb] = random_ind
chosen_inds.add(random_ind)
data_lock.release()
seq_inds[bb, 0] = 0
seq_inds[bb, 1] = len(sequences[bb]['color'])
data_len = min(constants.NUM_UNROLLS, seq_inds[bb, 1] - seq_inds[bb, 0])
ind0 = seq_inds[bb, 0]
ind1 = seq_inds[bb, 0] + data_len
data = sequences[bb]['color'][ind0:ind1]
action = sequences[bb]['action'][ind0:ind1]
labels = sequences[bb]['label'][ind0:ind1]
memory = sequences[bb]['memory'].copy()
gru_state = sequences[bb]['gru_state'].copy()
pose = sequences[bb]['pose'][ind0:ind1]
goal_pose = sequences[bb]['goal_pose']
mask = sequences[bb]['weight'][ind0:ind1]
pose_indicator = sequences[bb]['pose_indicator'][ind0:ind1]
possible_label = sequences[bb]['possible_label'][ind0:ind1]
if constants.DRAWING:
batch_debug_images.append(sequences[bb]['debug_images'][ind0:ind1])
if data_len < (constants.NUM_UNROLLS):
seq_inds[bb, :] = 0
data.extend([np.zeros_like(data[0]) for _ in range(constants.NUM_UNROLLS - data_len)])
action.extend([np.zeros_like(action[0]) for _ in range(constants.NUM_UNROLLS - data_len)])
labels.extend([np.zeros_like(labels[0]) for _ in range(constants.NUM_UNROLLS - data_len)])
pose.extend([pose[-1] for _ in range(constants.NUM_UNROLLS - data_len)])
mask.extend([np.zeros_like(mask[0]) for _ in range(constants.NUM_UNROLLS - data_len)])
pose_indicator.extend([np.zeros_like(pose_indicator[0]) for _ in range(constants.NUM_UNROLLS - data_len)])
possible_label.extend([np.zeros_like(possible_label[0]) for _ in range(constants.NUM_UNROLLS - data_len)])
else:
seq_inds[bb, 0] += constants.NUM_UNROLLS
batch_data.append(data)
batch_action.append(action)
batch_memory.append(memory)
batch_gru_state.append(gru_state)
batch_pose.append(pose)
batch_goal_pose.append(goal_pose)
batch_labels.append(labels)
batch_mask.append(mask)
batch_pose_indicator.append(pose_indicator)
batch_possible_label.append(possible_label)
feed_dict = {
network.image_placeholder: np.ascontiguousarray(batch_data),
network.action_placeholder: np.ascontiguousarray(batch_action),
network.gru_placeholder: np.ascontiguousarray(batch_gru_state),
network.pose_placeholder: np.ascontiguousarray(batch_pose),
network.goal_pose_placeholder: np.ascontiguousarray(batch_goal_pose),
network.labels_placeholder: np.ascontiguousarray(batch_labels)[...,np.newaxis],
network.mask_placeholder: np.ascontiguousarray(batch_mask),
network.pose_indicator_placeholder: np.ascontiguousarray(batch_pose_indicator),
network.possible_label_placeholder: np.ascontiguousarray(batch_possible_label),
network.memory_placeholders: np.ascontiguousarray(batch_memory),
}
dataTEnd = time.time()
summaryTime = 0
if constants.DEBUG or constants.DRAWING:
outputs = sess.run(
[training_step, network.loss, network.gru_state, network.patch_weights_sigm,
network.gru_outputs_full, network.is_possible_sigm,
network.pose_indicator_placeholder, network.terminal_patches,
network.gru_outputs],
feed_dict=feed_dict)
else:
if iteration == start_it + 10:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
outputs = sess.run([training_step, network.loss, network.gru_state,
summary_with_images, network.gru_outputs],
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata)
loss_summary = outputs[3]
summary_writer.add_run_metadata(run_metadata, 'step_%07d' % iteration)
summary_writer.add_summary(loss_summary, iteration)
summary_writer.flush()
elif iteration % 10 == 0:
if iteration % 100 == 0:
outputs = sess.run(
[training_step, network.loss, network.gru_state, summary_with_images, network.gru_outputs],
feed_dict=feed_dict)
elif iteration % 10 == 0:
outputs = sess.run(
[training_step, network.loss, network.gru_state, loss_summary_op, network.gru_outputs],
feed_dict=feed_dict)
loss_summary = outputs[3]
summaryTStart = time.time()
summary_writer.add_summary(loss_summary, iteration)
summary_writer.flush()
summaryTime = time.time() - summaryTStart
else:
outputs = sess.run([training_step, network.loss, network.gru_state, network.gru_outputs], feed_dict=feed_dict)
gru_state_out = outputs[2]
memory_out = outputs[-1]
for mm in range(constants.BATCH_SIZE):
sequences[mm]['memory'] = memory_out[mm,...]
sequences[mm]['gru_state'] = gru_state_out[mm,...]
loss = outputs[1]
solverTEnd = time.time()
if constants.DEBUG or constants.DRAWING:
# Look at outputs
patch_weights = outputs[3]
is_possible = outputs[5]
pose_indicator = outputs[6]
terminal_patches = outputs[7]
data_lock.acquire()
for bb in range(constants.BATCH_SIZE):
for tt in range(constants.NUM_UNROLLS):
if batch_mask[bb][tt] == 0:
break
if constants.DRAWING:
import cv2
import scipy.misc
from utils import drawing
curr_image = batch_data[bb][tt]
label = np.flipud(batch_labels[bb][tt])
debug_images = batch_debug_images[bb][tt]
color_image = debug_images['color']
state_image = debug_images['state_image']
label_memory_image = debug_images['label_memory'][:,:,0]
label_memory_image_class = np.argmax(debug_images['label_memory'][:,:,1:], axis=2)
label_memory_image_class[0,0] = constants.NUM_CLASSES
label_patch = debug_images['label']
print('Possible pred %.3f' % is_possible[bb,tt])
print('Possible label %.3f' % batch_possible_label[bb][tt])
patch = np.flipud(patch_weights[bb,tt,...])
patch_occupancy = patch[:,:,0]
print('occ', patch_occupancy)
print('label', label)
terminal_patch = np.flipud(np.sum(terminal_patches[bb,tt,...], axis=2))
image_list = [
debug_images['color'],
state_image,
debug_images['label_memory'][:,:,0],
debug_images['memory_map'][:,:,0],
label[:,:],
patch_occupancy,
np.flipud(pose_indicator[bb,tt]),
terminal_patch,
]
image = drawing.subplot(image_list, 4, 2, constants.SCREEN_WIDTH, constants.SCREEN_HEIGHT)
cv2.imshow('image', image[:,:,::-1])
cv2.waitKey(0)
else:
pdb.set_trace()
data_lock.release()
if not (constants.DEBUG or constants.DRAWING) and (iteration % 500 == 0 or iteration == constants.MAX_TIME_STEP - 1):
saverTStart = time.time()
tf_util.save(saver, sess, constants.CHECKPOINT_DIR, iteration)
saverTEnd = time.time()
print('Saver: %.3f' % (saverTEnd - saverTStart))
curr_it += 1
dataTimeTotal += dataTEnd - tStart
summaryTimeTotal += summaryTime
solverTimeTotal += solverTEnd - dataTEnd - summaryTime
totalTimeTotal += time.time() - tStart
if iteration == start_it or (iteration) % 10 == 0:
print('Iteration: %d' % (iteration))
print('Loss: %.3f' % loss)
print('Data: %.3f' % (dataTimeTotal / curr_it))
print('Solver: %.3f' % (solverTimeTotal / curr_it))
print('Summary: %.3f' % (summaryTimeTotal / curr_it))
print('Total: %.3f' % (totalTimeTotal / curr_it))
print('Current: %.3f\n' % ((time.time() - currentTimeStart) / min(10, curr_it)))
except:
import traceback
traceback.print_exc()
finally:
# Save final model
if not (constants.DEBUG or constants.DRAWING):
tf_util.save(saver, sess, constants.CHECKPOINT_DIR, iteration)
if __name__ == '__main__':
run()
|
master.py
|
# -*- coding: utf-8 -*-
'''
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
# Import python libs
import os
import re
import time
import errno
import fnmatch
import signal
import shutil
import stat
import logging
import hashlib
try:
import pwd
except ImportError: # This is in case windows minion is importing
pass
import getpass
import resource
import subprocess
import multiprocessing
import sys
# Import third party libs
import zmq
import yaml
from M2Crypto import RSA
# Import salt libs
import salt.crypt
import salt.utils
import salt.client
import salt.payload
import salt.pillar
import salt.state
import salt.runner
import salt.auth
import salt.wheel
import salt.minion
import salt.search
import salt.key
import salt.fileserver
import salt.daemons.masterapi
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.verify
import salt.utils.minions
import salt.utils.gzip_util
from salt.utils.debug import enable_sigusr1_handler, enable_sigusr2_handler, inspect_stack
from salt.exceptions import MasterExit
from salt.utils.event import tagify
from salt.pillar import git_pillar
# Import halite libs
try:
import halite
HAS_HALITE = True
except ImportError:
HAS_HALITE = False
try:
import systemd.daemon
HAS_PYTHON_SYSTEMD = True
except ImportError:
HAS_PYTHON_SYSTEMD = False
log = logging.getLogger(__name__)
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(signal.SIGKILL, proc.pid)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
class SMaster(object):
'''
Create a simple salt-master, this will generate the top level master
'''
def __init__(self, opts):
'''
Create a salt master server instance
'''
self.opts = opts
self.master_key = salt.crypt.MasterKeys(self.opts)
self.key = self.__prep_key()
self.crypticle = self.__prep_crypticle()
def __prep_crypticle(self):
'''
Return the crypticle used for AES
'''
return salt.crypt.Crypticle(self.opts, self.opts['aes'])
def __prep_key(self):
'''
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
'''
return salt.daemons.masterapi.access_keys(self.opts)
class Master(SMaster):
'''
The salt master server
'''
def __init__(self, opts):
'''
Create a salt master server instance
'''
# Warn if ZMQ < 3.2
if not(hasattr(zmq, 'zmq_version_info')) or \
zmq.zmq_version_info() < (3, 2):
# PyZMQ 2.1.9 does not have zmq_version_info
log.warning('You have a version of ZMQ less than ZMQ 3.2! There '
'are known connection keep-alive issues with ZMQ < '
'3.2 which may result in loss of contact with '
'minions. Please upgrade your ZMQ!')
SMaster.__init__(self, opts)
def _clear_old_jobs(self):
'''
The clean old jobs function is the general passive maintenance process
controller for the Salt master. This is where any data that needs to
be cleanly maintained from the master is maintained.
'''
search = salt.search.Search(self.opts)
last = int(time.time())
rotate = int(time.time())
fileserver = salt.fileserver.Fileserver(self.opts)
runners = salt.loader.runner(self.opts)
schedule = salt.utils.schedule.Schedule(self.opts, runners)
ckminions = salt.utils.minions.CkMinions(self.opts)
event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
pillargitfs = []
for opts_dict in [x for x in self.opts.get('ext_pillar', [])]:
if 'git' in opts_dict:
br, loc = opts_dict['git'].strip().split()
pillargitfs.append(git_pillar.GitPillar(br, loc, self.opts))
old_present = set()
while True:
now = int(time.time())
loop_interval = int(self.opts['loop_interval'])
if (now - last) >= loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
if self.opts.get('publish_session'):
if now - rotate >= self.opts['publish_session']:
salt.crypt.dropfile(self.opts['cachedir'])
rotate = now
if self.opts.get('search'):
if now - last >= self.opts['search_index_interval']:
search.index()
salt.daemons.masterapi.fileserver_update(fileserver)
# check how close to FD limits you are
salt.utils.verify.check_max_open_files(self.opts)
try:
for pillargit in pillargitfs:
pillargit.update()
except Exception as exc:
log.error('Exception {0} occurred in file server update '
'for git_pillar module.'.format(exc))
try:
schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if schedule.loop_interval < loop_interval:
loop_interval = schedule.loop_interval
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
last = now
if self.opts.get('presence_events', False):
present = ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {'new': list(new),
'lost': list(lost)}
event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
event.fire_event(data, tagify('present', 'presence'))
old_present = present
try:
time.sleep(loop_interval)
except KeyboardInterrupt:
break
def __set_max_open_files(self):
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
if mof_h == resource.RLIM_INFINITY:
# Unclear what to do with infinity... OSX reports RLIM_INFINITY as
# hard limit,but raising to anything above soft limit fails...
mof_h = mof_s
log.info(
'Current values for max open files soft/hard setting: '
'{0}/{1}'.format(
mof_s, mof_h
)
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts['max_open_files']
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.info(
'The value for the \'max_open_files\' setting, {0}, is higher '
'than what the user running salt is allowed to raise to, {1}. '
'Defaulting to {1}.'.format(mof_c, mof_h)
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.info('Raising max open files value to {0}'.format(mof_c))
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
try:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
'New values for max open files soft/hard values: '
'{0}/{1}'.format(mof_s, mof_h)
)
except ValueError:
# https://github.com/saltstack/salt/issues/1991#issuecomment-13025595
# A user under OSX reported that our 100000 default value is
# still too high.
log.critical(
'Failed to raise max open files setting to {0}. If this '
'value is too low. The salt-master will most likely fail '
'to run properly.'.format(
mof_c
)
)
def _pre_flight(self):
'''
Run pre flight checks, if anything in this method fails then the master
should not start up
'''
errors = []
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
'Failed to load fileserver backends, the configured backends '
'are: {0}'.format(', '.join(self.opts['fileserver_backend']))
)
if not self.opts['fileserver_backend']:
errors.append('No fileserver backends are configured')
if errors:
for error in errors:
log.error(error)
log.error('Master failed pre flight checks, exiting\n')
sys.exit(1)
def start(self):
'''
Turn on the master server components
'''
self._pre_flight()
log.info(
'salt-master is starting as user {0!r}'.format(getpass.getuser())
)
enable_sigusr1_handler()
enable_sigusr2_handler()
self.__set_max_open_files()
clear_old_jobs_proc = multiprocessing.Process(
target=self._clear_old_jobs)
clear_old_jobs_proc.start()
reqserv = ReqServer(
self.opts,
self.crypticle,
self.key,
self.master_key)
reqserv.start_publisher()
reqserv.start_event_publisher()
reqserv.start_reactor()
reqserv.start_halite()
def sigterm_clean(signum, frame):
'''
Cleaner method for stopping multiprocessing processes when a
SIGTERM is encountered. This is required when running a salt
master under a process minder like daemontools
'''
log.warn(
'Caught signal {0}, stopping the Salt Master'.format(
signum
)
)
clean_proc(clear_old_jobs_proc)
clean_proc(reqserv.publisher)
clean_proc(reqserv.eventpublisher)
if hasattr(reqserv, 'halite'):
clean_proc(reqserv.halite)
if hasattr(reqserv, 'reactor'):
clean_proc(reqserv.reactor)
for proc in reqserv.work_procs:
clean_proc(proc)
raise MasterExit
signal.signal(signal.SIGTERM, sigterm_clean)
try:
reqserv.run()
except KeyboardInterrupt:
# Shut the master down gracefully on SIGINT
log.warn('Stopping the Salt Master')
raise SystemExit('\nExiting on Ctrl-c')
class Halite(multiprocessing.Process):
'''
Manage the Halite server
'''
def __init__(self, hopts):
super(Halite, self).__init__()
self.hopts = hopts
def run(self):
'''
Fire up halite!
'''
halite.start(self.hopts)
class Publisher(multiprocessing.Process):
'''
The publishing interface, a simple zeromq publisher that sends out the
commands.
'''
def __init__(self, opts):
super(Publisher, self).__init__()
self.opts = opts
def run(self):
'''
Bind to the interface specified in the configuration file
'''
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000))
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000))
pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000))
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
salt.utils.check_ipc_path_max_len(pull_uri)
# Start the minion command publisher
log.info('Starting the Salt Publisher on {0}'.format(pub_uri))
pub_sock.bind(pub_uri)
# Securely create socket
log.info('Starting the Salt Puller on {0}'.format(pull_uri))
old_umask = os.umask(0177)
try:
pull_sock.bind(pull_uri)
finally:
os.umask(old_umask)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
package = pull_sock.recv()
pub_sock.send(package)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except KeyboardInterrupt:
if pub_sock.closed is False:
pub_sock.setsockopt(zmq.LINGER, 1)
pub_sock.close()
if pull_sock.closed is False:
pull_sock.setsockopt(zmq.LINGER, 1)
pull_sock.close()
if context.closed is False:
context.term()
class ReqServer(object):
'''
Starts up the master request server, minions send results to this
interface.
'''
def __init__(self, opts, crypticle, key, mkey):
self.opts = opts
self.master_key = mkey
self.context = zmq.Context(self.opts['worker_threads'])
# Prepare the zeromq sockets
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
self.workers = self.context.socket(zmq.DEALER)
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
# Prepare the AES key
self.key = key
self.crypticle = crypticle
def __bind(self):
'''
Binds the reply server
'''
dfn = os.path.join(self.opts['cachedir'], '.dfn')
if os.path.isfile(dfn):
try:
os.remove(dfn)
except os.error:
pass
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.work_procs = []
for ind in range(int(self.opts['worker_threads'])):
self.work_procs.append(MWorker(self.opts,
self.master_key,
self.key,
self.crypticle))
for ind, proc in enumerate(self.work_procs):
log.info('Starting Salt worker process {0}'.format(ind))
proc.start()
self.workers.bind(self.w_uri)
try:
if HAS_PYTHON_SYSTEMD and systemd.daemon.booted():
systemd.daemon.notify('READY=1')
except SystemError:
# Daemon wasn't started by systemd
pass
while True:
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
def start_publisher(self):
'''
Start the salt publisher interface
'''
# Start the publisher
self.publisher = Publisher(self.opts)
self.publisher.start()
def start_event_publisher(self):
'''
Start the salt publisher interface
'''
# Start the publisher
self.eventpublisher = salt.utils.event.EventPublisher(self.opts)
self.eventpublisher.start()
def start_reactor(self):
'''
Start the reactor, but only if the reactor interface is configured
'''
if self.opts.get('reactor'):
self.reactor = salt.utils.event.Reactor(self.opts)
self.reactor.start()
def start_halite(self):
'''
If halite is configured and installed, fire it up!
'''
if HAS_HALITE and 'halite' in self.opts:
log.info('Halite: Starting up ...')
self.halite = Halite(self.opts['halite'])
self.halite.start()
elif 'halite' in self.opts:
log.info('Halite: Not configured, skipping.')
else:
log.debug('Halite: Unavailable.')
def run(self):
'''
Start up the ReqServer
'''
self.__bind()
def destroy(self):
if self.clients.closed is False:
self.clients.setsockopt(zmq.LINGER, 1)
self.clients.close()
if self.workers.closed is False:
self.workers.setsockopt(zmq.LINGER, 1)
self.workers.close()
if self.context.closed is False:
self.context.term()
# Also stop the workers
for worker in self.work_procs:
if worker.is_alive() is True:
worker.terminate()
def __del__(self):
self.destroy()
class MWorker(multiprocessing.Process):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
crypticle):
multiprocessing.Process.__init__(self)
self.opts = opts
self.serial = salt.payload.Serial(opts)
self.crypticle = crypticle
self.mkey = mkey
self.key = key
self.k_mtime = 0
def __bind(self):
'''
Bind to the local port
'''
context = zmq.Context(1)
socket = context.socket(zmq.REP)
w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Worker binding to socket {0}'.format(w_uri))
try:
socket.connect(w_uri)
while True:
try:
package = socket.recv()
self._update_aes()
payload = self.serial.loads(package)
ret = self.serial.dumps(self._handle_payload(payload))
socket.send(ret)
# Properly handle EINTR from SIGUSR1
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
# Changes here create a zeromq condition, check with thatch45 before
# making any zeromq changes
except KeyboardInterrupt:
socket.close()
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
'''
try:
key = payload['enc']
load = payload['load']
except KeyError:
return ''
return {'aes': self._handle_aes,
'pub': self._handle_pub,
'clear': self._handle_clear}[key](load)
def _handle_clear(self, load):
'''
Take care of a cleartext command
'''
log.info('Clear payload received with command {cmd}'.format(**load))
if load['cmd'].startswith('__'):
return False
return getattr(self.clear_funcs, load['cmd'])(load)
def _handle_pub(self, load):
'''
Handle a command sent via a public key pair
'''
if load['cmd'].startswith('__'):
return False
log.info('Pubkey payload received with command {cmd}'.format(**load))
def _handle_aes(self, load):
'''
Handle a command sent via an AES key
'''
try:
data = self.crypticle.loads(load)
except Exception:
return ''
if 'cmd' not in data:
log.error('Received malformed command {0}'.format(data))
return {}
log.info('AES payload received with command {0}'.format(data['cmd']))
if data['cmd'].startswith('__'):
return False
return self.aes_funcs.run_func(data['cmd'], data)
def _update_aes(self):
'''
Check to see if a fresh AES key is available and update the components
of the worker
'''
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
except os.error:
return
if stats.st_mode != 0100400:
# Invalid dfn, return
return
if stats.st_mtime > self.k_mtime:
# new key, refresh crypticle
with salt.utils.fopen(dfn) as fp_:
aes = fp_.read()
if len(aes) != 76:
return
self.crypticle = salt.crypt.Crypticle(self.opts, aes)
self.clear_funcs.crypticle = self.crypticle
self.clear_funcs.opts['aes'] = aes
self.aes_funcs.crypticle = self.crypticle
self.aes_funcs.opts['aes'] = aes
self.k_mtime = stats.st_mtime
def run(self):
'''
Start a Master Worker
'''
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
self.mkey,
self.crypticle)
self.aes_funcs = AESFuncs(self.opts, self.crypticle)
self.__bind()
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts, crypticle):
self.opts = opts
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
self.serial = salt.payload.Serial(opts)
self.crypticle = crypticle
self.ckminions = salt.utils.minions.CkMinions(opts)
# Create the tops dict for loading external top data
self.tops = salt.loader.tops(self.opts)
# Make a client
self.local = salt.client.LocalClient(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
self.__setup_fileserver()
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = fs_.serve_file
self._file_hash = fs_.file_hash
self._file_list = fs_.file_list
self._file_list_emptydirs = fs_.file_list_emptydirs
self._dir_list = fs_.dir_list
self._symlink_list = fs_.symlink_list
self._file_envs = fs_.envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
with salt.utils.fopen(pub_path, 'r') as fp_:
minion_pub = fp_.read()
tmp_pub = salt.utils.mkstemp()
with salt.utils.fopen(tmp_pub, 'w+') as fp_:
fp_.write(minion_pub)
pub = None
try:
pub = RSA.load_pub_key(tmp_pub)
except RSA.RSAError as err:
log.error('Unable to load temporary public key "{0}": {1}'
.format(tmp_pub, err))
try:
os.remove(tmp_pub)
if pub.public_decrypt(token, 5) == 'salt':
return True
except RSA.RSAError as err:
log.error('Unable to decrypt token: {0}'.format(err))
log.error('Salt minion claiming to be {0} has attempted to'
'communicate with the master and could not be verified'
.format(id_))
return False
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if re.match('publish.*', clear_load['fun']):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
(
'Minion id {0} is not who it says it is and is attempting '
'to issue a peer command'
).format(clear_load['id'])
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
good = self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
return False
return True
def _ext_nodes(self, load):
'''
Return the results from an external node classifier if one is
specified
'''
if 'id' not in load:
log.error('Received call for external nodes without an id')
return {}
if not salt.utils.verify.valid_id(self.opts, load['id']):
return {}
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
load.pop('tok')
ret = {}
# The old ext_nodes method is set to be deprecated in 0.10.4
# and should be removed within 3-5 releases in favor of the
# "master_tops" system
if self.opts['external_nodes']:
if not salt.utils.which(self.opts['external_nodes']):
log.error(('Specified external nodes controller {0} is not'
' available, please verify that it is installed'
'').format(self.opts['external_nodes']))
return {}
cmd = '{0} {1}'.format(self.opts['external_nodes'], load['id'])
ndata = yaml.safe_load(
subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE
).communicate()[0])
if 'environment' in ndata:
saltenv = ndata['environment']
else:
saltenv = 'base'
if 'classes' in ndata:
if isinstance(ndata['classes'], dict):
ret[saltenv] = list(ndata['classes'])
elif isinstance(ndata['classes'], list):
ret[saltenv] = ndata['classes']
else:
return ret
# Evaluate all configured master_tops interfaces
opts = {}
grains = {}
if 'opts' in load:
opts = load['opts']
if 'grains' in load['opts']:
grains = load['opts']['grains']
for fun in self.tops:
if fun not in self.opts.get('master_tops', {}):
continue
try:
ret.update(self.tops[fun](opts=opts, grains=grains))
except Exception as exc:
# If anything happens in the top generation, log it and move on
log.error(
'Top function {0} failed with error {1} for minion '
'{2}'.format(
fun, exc, load['id']
)
)
return ret
def _master_opts(self, load):
'''
Return the master options to the minion
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
'''
if any(key not in load for key in ('id', 'tgt', 'fun')):
return {}
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
load.pop('tok')
if 'mine_get' in self.opts:
# If master side acl defined.
if not isinstance(self.opts['mine_get'], dict):
return {}
perms = set()
for match in self.opts['mine_get']:
if re.match(match, load['id']):
if isinstance(self.opts['mine_get'][match], list):
perms.update(self.opts['mine_get'][match])
if not any(re.match(perm, load['fun']) for perm in perms):
return {}
ret = {}
if not salt.utils.verify.valid_id(self.opts, load['id']):
return ret
checker = salt.utils.minions.CkMinions(self.opts)
minions = checker.check_minions(
load['tgt'],
load.get('expr_form', 'glob')
)
for minion in minions:
mine = os.path.join(
self.opts['cachedir'],
'minions',
minion,
'mine.p')
try:
with salt.utils.fopen(mine, 'rb') as fp_:
fdata = self.serial.load(fp_).get(load['fun'])
if fdata:
ret[minion] = fdata
except Exception:
continue
return ret
def _mine(self, load):
'''
Return the mine data
'''
if 'id' not in load or 'data' not in load:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
load.pop('tok')
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'mine.p')
if not load.get('clear', False):
if os.path.isfile(datap):
with salt.utils.fopen(datap, 'rb') as fp_:
new = self.serial.load(fp_)
if isinstance(new, dict):
new.update(load['data'])
load['data'] = new
with salt.utils.fopen(datap, 'w+b') as fp_:
fp_.write(self.serial.dumps(load['data']))
return True
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
'''
if 'id' not in load or 'fun' not in load:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
load.pop('tok')
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
return True
datap = os.path.join(cdir, 'mine.p')
if os.path.isfile(datap):
try:
with salt.utils.fopen(datap, 'rb') as fp_:
mine_data = self.serial.load(fp_)
if isinstance(mine_data, dict):
if mine_data.pop(load['fun'], False):
with salt.utils.fopen(datap, 'w+b') as fp_:
fp_.write(self.serial.dumps(mine_data))
except OSError:
return False
return True
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
'''
if 'id' not in load:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
load.pop('tok')
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
return True
datap = os.path.join(cdir, 'mine.p')
if os.path.isfile(datap):
try:
os.remove(datap)
except OSError:
return False
return True
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not self.opts['file_recv'] or os.path.isabs(load['path']):
return False
if os.path.isabs(load['path']) or '../' in load['path']:
# Can overwrite master files!!
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
file_recv_max_size = 1024*1024 * self.opts.get('file_recv_max_size', 100)
if len(load['data']) + load.get('loc', 0) > file_recv_max_size:
log.error(
'Exceeding file_recv_max_size limit: {0}'.format(
file_recv_max_size
)
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
load.pop('tok')
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
load['path'])
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(load['data'])
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
pillar = salt.pillar.Pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
load.get('ext'),
self.mminion.functions)
data = pillar.compile_pillar()
if self.opts.get('minion_data_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'data.p')
with salt.utils.fopen(datap, 'w+b') as fp_:
fp_.write(
self.serial.dumps(
{'grains': load['grains'],
'pillar': data})
)
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
'''
if 'id' not in load:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
load.pop('tok')
if 'events' not in load and ('tag' not in load or 'data' not in load):
return False
if 'events' in load:
for event in load['events']:
self.event.fire_event(event, event['tag']) # old dup event
if load.get('pretag') is not None:
self.event.fire_event(event, tagify(event['tag'], base=load['pretag']))
else:
tag = load['tag']
self.event.fire_event(load, tag)
return True
def _return(self, load):
'''
Handle the return data sent from the minions
'''
# If the return data is invalid, just ignore it
if any(key not in load for key in ('return', 'jid', 'id')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
new_loadp = False
if load['jid'] == 'req':
# The minion is returning a standalone job, request a jobid
load['arg'] = load.get('arg', load.get('fun_args', []))
load['tgt_type'] = 'glob'
load['tgt'] = load['id']
load['jid'] = salt.utils.prep_jid(
self.opts['cachedir'],
self.opts['hash_type'],
load.get('nocache', False))
new_loadp = load.get('nocache', True) and True
log.info('Got return from {id} for job {jid}'.format(**load))
self.event.fire_event(load, load['jid']) # old dup event
self.event.fire_event(
load, tagify([load['jid'], 'ret', load['id']], 'job'))
self.event.fire_ret_load(load)
if self.opts['master_ext_job_cache']:
fstr = '{0}.returner'.format(self.opts['master_ext_job_cache'])
self.mminion.returners[fstr](load)
return
if not self.opts['job_cache'] or self.opts.get('ext_job_cache'):
return
jid_dir = salt.utils.jid_dir(
load['jid'],
self.opts['cachedir'],
self.opts['hash_type']
)
if os.path.exists(os.path.join(jid_dir, 'nocache')):
return
if new_loadp:
with salt.utils.fopen(
os.path.join(jid_dir, '.load.p'), 'w+b'
) as fp_:
self.serial.dump(load, fp_)
hn_dir = os.path.join(jid_dir, load['id'])
try:
os.mkdir(hn_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Minion has already returned this jid and it should be dropped
log.error(
'An extra return was detected from minion {0}, please verify '
'the minion, this could be a replay attack'.format(
load['id']
)
)
return False
elif e.errno == errno.ENOENT:
log.error(
'An inconsistency occurred, a job was received with a job id '
'that is not present on the master: {jid}'.format(**load)
)
return False
raise
self.serial.dump(
load['return'],
# Use atomic open here to avoid the file being read before it's
# completely written to. Refs #1935
salt.utils.atomicfile.atomic_open(
os.path.join(hn_dir, 'return.p'), 'w+b'
)
)
if 'out' in load:
self.serial.dump(
load['out'],
# Use atomic open here to avoid the file being read before
# it's completely written to. Refs #1935
salt.utils.atomicfile.atomic_open(
os.path.join(hn_dir, 'out.p'), 'w+b'
)
)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
'''
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
return None
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
# set the write flag
jid_dir = salt.utils.jid_dir(
load['jid'],
self.opts['cachedir'],
self.opts['hash_type']
)
if not os.path.isdir(jid_dir):
os.makedirs(jid_dir)
if 'load' in load:
with salt.utils.fopen(os.path.join(jid_dir, '.load.p'), 'w+b') as fp_:
self.serial.dump(load['load'], fp_)
wtag = os.path.join(jid_dir, 'wtag_{0}'.format(load['id']))
try:
with salt.utils.fopen(wtag, 'w+b') as fp_:
fp_.write('')
except (IOError, OSError):
log.error(
'Failed to commit the write tag for the syndic return, are '
'permissions correct in the cache dir: {0}?'.format(
self.opts['cachedir']
)
)
return False
# Format individual return loads
for key, item in load['return'].items():
ret = {'jid': load['jid'],
'id': key,
'return': item}
if 'out' in load:
ret['out'] = load['out']
self._return(ret)
if os.path.isfile(wtag):
os.remove(wtag)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
'''
if 'peer_run' not in self.opts:
return {}
if not isinstance(self.opts['peer_run'], dict):
return {}
if any(key not in clear_load for key in ('fun', 'arg', 'id', 'tok')):
return {}
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
clear_load['id']
)
)
return {}
clear_load.pop('tok')
perms = set()
for match in self.opts['peer_run']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer_run'][match], list):
perms.update(self.opts['peer_run'][match])
good = False
for perm in perms:
if re.match(perm, clear_load['fun']):
good = True
if not good:
return {}
# Prepare the runner object
opts = {'fun': clear_load['fun'],
'arg': clear_load['arg'],
'id': clear_load['id'],
'doc': False,
'conf_file': self.opts['conf_file']}
opts.update(self.opts)
runner = salt.runner.Runner(opts)
return runner.run()
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
'''
if any(key not in load for key in ('jid', 'id', 'tok')):
return {}
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
load.pop('tok')
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, load['jid'])
with salt.utils.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module
'''
if not self.__verify_minion_publish(clear_load):
return {}
# Set up the publication payload
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'expr_form': clear_load.get('tgt_type', 'glob'),
'tgt': clear_load['tgt'],
'ret': clear_load['ret'],
'id': clear_load['id'],
}
if 'tgt_type' in clear_load:
if clear_load['tgt_type'].startswith('node'):
if clear_load['tgt'] in self.opts['nodegroups']:
load['tgt'] = self.opts['nodegroups'][clear_load['tgt']]
load['expr_form_type'] = 'compound'
load['expr_form'] = clear_load['tgt_type']
else:
return {}
else:
load['expr_form'] = clear_load['tgt_type']
ret = {}
ret['jid'] = self.local.cmd_async(**load)
ret['minions'] = self.ckminions.check_minions(
clear_load['tgt'],
load['expr_form'])
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, ret['jid'])
with salt.utils.fopen(jid_fn, 'w+') as fp_:
fp_.write(clear_load['id'])
return ret
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module
'''
if not self.__verify_minion_publish(clear_load):
return {}
# Set up the publication payload
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'expr_form': clear_load.get('tgt_type', 'glob'),
'tgt': clear_load['tgt'],
'ret': clear_load['ret'],
'id': clear_load['id'],
}
if 'tmo' in clear_load:
try:
load['timeout'] = int(clear_load['tmo'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(
clear_load['tmo'])
log.warn(msg)
return {}
if 'timeout' in clear_load:
try:
load['timeout'] = int(clear_load['timeout'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(
clear_load['tmo'])
log.warn(msg)
return {}
if 'tgt_type' in clear_load:
if clear_load['tgt_type'].startswith('node'):
if clear_load['tgt'] in self.opts['nodegroups']:
load['tgt'] = self.opts['nodegroups'][clear_load['tgt']]
load['expr_form_type'] = 'compound'
else:
return {}
else:
load['expr_form'] = clear_load['tgt_type']
load['raw'] = True
ret = {}
for minion in self.local.cmd_iter(**load):
if clear_load.get('form', '') == 'full':
data = minion
if 'jid' in minion:
ret['__jid__'] = minion['jid']
data['ret'] = data.pop('return')
ret[minion['id']] = data
else:
ret[minion['id']] = minion['return']
if 'jid' in minion:
ret['__jid__'] = minion['jid']
for key, val in self.local.get_cache_returns(ret['__jid__']).items():
if not key in ret:
ret[key] = val
if clear_load.get('form', '') != 'full':
ret.pop('__jid__')
return ret
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
'''
if 'id' not in load or 'tok' not in load:
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
(
'Minion id {0} is not who it says it is and is attempting '
'to revoke the key for {0}'
).format(load['id'])
)
return False
keyapi = salt.key.Key(self.opts)
keyapi.delete_key(load['id'])
return True
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
'''
# Don't honor private functions
if func.startswith('__'):
return self.crypticle.dumps({})
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call {0} took {1} seconds'.format(
func, time.time() - start
)
)
except Exception:
ret = ''
log.error(
'Error in function {0}:\n'.format(func),
exc_info=True
)
else:
log.error(
'Received function {0} which is unavailable on the master, '
'returning False'.format(
func
)
)
return self.crypticle.dumps(False)
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return self.crypticle.dumps(ret)
# encrypt with a specific AES key
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
load['id'])
key = salt.crypt.Crypticle.generate_key_string()
pcrypt = salt.crypt.Crypticle(
self.opts,
key)
try:
pub = RSA.load_pub_key(pubfn)
except RSA.RSAError:
return self.crypticle.dumps({})
pret = {}
pret['key'] = pub.public_encrypt(key, 4)
pret['pillar'] = pcrypt.dumps(
ret if ret is not False else {}
)
return pret
# AES Encrypt the return
return self.crypticle.dumps(ret)
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key, master_key, crypticle):
self.opts = opts
self.serial = salt.payload.Serial(opts)
self.key = key
self.master_key = master_key
self.crypticle = crypticle
# Create the event manager
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
# Make a client
self.local = salt.client.LocalClient(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
def __check_permissions(self, filename):
'''
Check if the specified filename has correct permissions
'''
if salt.utils.is_windows():
return True
# After we've ascertained we're not on windows
import grp
try:
user = self.opts['user']
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except KeyError:
log.error(
'Failed to determine groups for user {0}. The user is not '
'available.\n'.format(
user
)
)
return False
fmode = os.stat(filename)
if os.getuid() == 0:
if fmode.st_uid == uid or fmode.st_gid != gid:
return True
elif self.opts.get('permissive_pki_access', False) \
and fmode.st_gid in groups:
return True
else:
if stat.S_IWOTH & fmode.st_mode:
# don't allow others to write to the file
return False
# check group flags
if self.opts.get('permissive_pki_access', False) \
and stat.S_IWGRP & fmode.st_mode:
return True
elif stat.S_IWGRP & fmode.st_mode:
return False
# check if writable by group or other
if not (stat.S_IWGRP & fmode.st_mode or
stat.S_IWOTH & fmode.st_mode):
return True
return False
def __check_signing_file(self, keyid, signing_file):
'''
Check a keyid for membership in a signing file
'''
if not signing_file or not os.path.exists(signing_file):
return False
if not self.__check_permissions(signing_file):
message = 'Wrong permissions for {0}, ignoring content'
log.warn(message.format(signing_file))
return False
with salt.utils.fopen(signing_file, 'r') as fp_:
for line in fp_:
line = line.strip()
if line.startswith('#'):
continue
if line == keyid:
return True
if fnmatch.fnmatch(keyid, line):
return True
try:
if re.match(r'\A{0}\Z'.format(line), keyid):
return True
except re.error:
log.warn(
'{0} is not a valid regular expression, ignoring line '
'in {1}'.format(line, signing_file)
)
continue
return False
def __check_autoreject(self, keyid):
'''
Checks if the specified keyid should automatically be rejected.
'''
return self.__check_signing_file(
keyid,
self.opts.get('autoreject_file', None)
)
def __check_autosign(self, keyid):
'''
Checks if the specified keyid should automatically be signed.
'''
if self.opts['auto_accept']:
return True
return self.__check_signing_file(
keyid,
self.opts.get('autosign_file', None)
)
def _auth(self, load):
'''
Authenticate the client, use the sent public key to encrypt the AES key
which was generated at start up.
This method fires an event over the master event manager. The event is
tagged "auth" and returns a dict with information about the auth
event
# Verify that the key we are receiving matches the stored key
# Store the key if it is not there
# Make an RSA key with the pub key
# Encrypt the AES key as an encrypted salt.payload
# Package the return and return it
'''
if not salt.utils.verify.valid_id(self.opts, load['id']):
log.info(
'Authentication request from invalid id {id}'.format(**load)
)
return {'enc': 'clear',
'load': {'ret': False}}
log.info('Authentication request from {id}'.format(**load))
# Check if key is configured to be auto-rejected/signed
auto_reject = self.__check_autoreject(load['id'])
auto_sign = self.__check_autosign(load['id'])
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
load['id'])
pubfn_pend = os.path.join(self.opts['pki_dir'],
'minions_pre',
load['id'])
pubfn_rejected = os.path.join(self.opts['pki_dir'],
'minions_rejected',
load['id'])
pubfn_denied = os.path.join(self.opts['pki_dir'],
'minions_denied',
load['id'])
if self.opts['open_mode']:
# open mode is turned on, nuts to checks and overwrite whatever
# is there
pass
elif os.path.isfile(pubfn_rejected):
# The key has been rejected, don't place it in pending
log.info('Public key rejected for {id}'.format(**load))
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
elif os.path.isfile(pubfn):
# The key has been accepted, check it
if salt.utils.fopen(pubfn, 'r').read() != load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys did not match. This may be an attempt to compromise '
'the Salt cluster.'.format(**load)
)
# put denied minion key into minions_denied
with salt.utils.fopen(pubfn_denied, 'w+') as fp_:
fp_.write(load['pub'])
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
elif not os.path.isfile(pubfn_pend):
# The key has not been accepted, this is a new minion
if os.path.isdir(pubfn_pend):
# The key path is a directory, error out
log.info(
'New public key {id} is a directory'.format(**load)
)
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
if auto_reject:
key_path = pubfn_rejected
log.info('New public key for {id} rejected via autoreject_file'
.format(**load))
key_act = 'reject'
key_result = False
elif not auto_sign:
key_path = pubfn_pend
log.info('New public key for {id} placed in pending'
.format(**load))
key_act = 'pend'
key_result = True
else:
# The key is being automatically accepted, don't do anything
# here and let the auto accept logic below handle it.
key_path = None
if key_path is not None:
# Write the key to the appropriate location
with salt.utils.fopen(key_path, 'w+') as fp_:
fp_.write(load['pub'])
ret = {'enc': 'clear',
'load': {'ret': key_result}}
eload = {'result': key_result,
'act': key_act,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return ret
elif os.path.isfile(pubfn_pend):
# This key is in the pending dir and is awaiting acceptance
if auto_reject:
# We don't care if the keys match, this minion is being
# auto-rejected. Move the key file from the pending dir to the
# rejected dir.
try:
shutil.move(pubfn_pend, pubfn_rejected)
except (IOError, OSError):
pass
log.info('Pending public key for {id} rejected via '
'autoreject_file'.format(**load))
ret = {'enc': 'clear',
'load': {'ret': False}}
eload = {'result': False,
'act': 'reject',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return ret
elif not auto_sign:
# This key is in the pending dir and is not being auto-signed.
# Check if the keys are the same and error out if this is the
# case. Otherwise log the fact that the minion is still
# pending.
if salt.utils.fopen(pubfn_pend, 'r').read() != load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'key in pending did not match. This may be an '
'attempt to compromise the Salt cluster.'
.format(**load)
)
# put denied minion key into minions_denied
with salt.utils.fopen(pubfn_denied, 'w+') as fp_:
fp_.write(load['pub'])
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
else:
log.info(
'Authentication failed from host {id}, the key is in '
'pending and needs to be accepted with salt-key '
'-a {id}'.format(**load)
)
eload = {'result': True,
'act': 'pend',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': True}}
else:
# This key is in pending and has been configured to be
# auto-signed. Check to see if it is the same key, and if
# so, pass on doing anything here, and let it get automatically
# accepted below.
if salt.utils.fopen(pubfn_pend, 'r').read() != load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys in pending did not match. This may be an '
'attempt to compromise the Salt cluster.'
.format(**load)
)
# put denied minion key into minions_denied
with salt.utils.fopen(pubfn_denied, 'w+') as fp_:
fp_.write(load['pub'])
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
else:
pass
else:
# Something happened that I have not accounted for, FAIL!
log.warn('Unaccounted for authentication failure')
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
log.info('Authentication accepted from {id}'.format(**load))
# only write to disk if you are adding the file, and in open mode,
# which implies we accept any key from a minion (key needs to be
# written every time because what's on disk is used for encrypting)
if not os.path.isfile(pubfn) or self.opts['open_mode']:
with salt.utils.fopen(pubfn, 'w+') as fp_:
fp_.write(load['pub'])
pub = None
# The key payload may sometimes be corrupt when using auto-accept
# and an empty request comes in
try:
pub = RSA.load_pub_key(pubfn)
except RSA.RSAError as err:
log.error('Corrupt public key "{0}": {1}'.format(pubfn, err))
return {'enc': 'clear',
'load': {'ret': False}}
ret = {'enc': 'pub',
'pub_key': self.master_key.get_pub_str(),
'publish_port': self.opts['publish_port'],
}
if self.opts['auth_mode'] >= 2:
if 'token' in load:
try:
mtoken = self.master_key.key.private_decrypt(load['token'], 4)
aes = '{0}_|-{1}'.format(self.opts['aes'], mtoken)
except Exception:
# Token failed to decrypt, send back the salty bacon to
# support older minions
pass
else:
aes = self.opts['aes']
ret['aes'] = pub.public_encrypt(aes, 4)
else:
if 'token' in load:
try:
mtoken = self.master_key.key.private_decrypt(
load['token'], 4
)
ret['token'] = pub.public_encrypt(mtoken, 4)
except Exception:
# Token failed to decrypt, send back the salty bacon to
# support older minions
pass
aes = self.opts['aes']
ret['aes'] = pub.public_encrypt(self.opts['aes'], 4)
# Be aggressive about the signature
digest = hashlib.sha256(aes).hexdigest()
ret['sig'] = self.master_key.key.private_encrypt(digest, 5)
eload = {'result': True,
'act': 'accept',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return ret
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
if 'token' in clear_load:
try:
token = self.loadauth.get_tok(clear_load['token'])
except Exception as exc:
msg = 'Exception occurred when generating auth token: {0}'.format(
exc)
log.error(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if not token:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if token['eauth'] not in self.opts['external_auth']:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if token['name'] not in self.opts['external_auth'][token['eauth']]:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
good = self.ckminions.runner_check(
self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'],
clear_load['fun'])
if not good:
msg = ('Authentication failure of type "token" occurred for '
'user {0}.').format(token['name'])
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(
fun,
clear_load.get('kwarg', {}),
token['name'])
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=exc.message))
if 'eauth' not in clear_load:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if clear_load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
try:
name = self.loadauth.load_name(clear_load)
if not (name in self.opts['external_auth'][clear_load['eauth']]) | ('*' in self.opts['external_auth'][clear_load['eauth']]):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if not self.loadauth.time_auth(clear_load):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
good = self.ckminions.runner_check(
self.opts['external_auth'][clear_load['eauth']][name] if name in self.opts['external_auth'][clear_load['eauth']] else self.opts['external_auth'][clear_load['eauth']]['*'],
clear_load['fun'])
if not good:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(fun,
clear_load.get('kwarg', {}),
clear_load.get('username', 'UNKNOWN'))
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=exc.message))
except Exception as exc:
log.error(
'Exception occurred in the runner system: {0}'.format(exc)
)
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=exc.message))
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
if 'token' in clear_load:
try:
token = self.loadauth.get_tok(clear_load['token'])
except Exception as exc:
msg = 'Exception occurred when generating auth token: {0}'.format(
exc)
log.error(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if not token:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if token['eauth'] not in self.opts['external_auth']:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if token['name'] not in self.opts['external_auth'][token['eauth']]:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
good = self.ckminions.wheel_check(
self.opts['external_auth'][token['eauth']][token['name']]
if token['name'] in self.opts['external_auth'][token['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
clear_load['fun'])
if not good:
msg = ('Authentication failure of type "token" occurred for '
'user {0}.').format(token['name'])
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
jid = salt.utils.gen_jid()
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': token['name']}
try:
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **clear_load)
data['return'] = ret
data['success'] = True
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error(exc)
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
if 'eauth' not in clear_load:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if clear_load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
try:
name = self.loadauth.load_name(clear_load)
if not ((name in self.opts['external_auth'][clear_load['eauth']]) |
('*' in self.opts['external_auth'][clear_load['eauth']])):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if not self.loadauth.time_auth(clear_load):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
good = self.ckminions.wheel_check(
self.opts['external_auth'][clear_load['eauth']][name]
if name in self.opts['external_auth'][clear_load['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
clear_load['fun'])
if not good:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
jid = salt.utils.gen_jid()
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': clear_load.get('username', 'UNKNOWN')}
try:
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **clear_load)
data['return'] = ret
data['success'] = True
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error(
'Exception occurred in the wheel system: {0}'.format(exc)
)
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=exc.message))
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
if 'eauth' not in clear_load:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
if clear_load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
log.warning('Authentication failure of type "eauth" occurred.')
return ''
try:
name = self.loadauth.load_name(clear_load)
if not ((name in self.opts['external_auth'][clear_load['eauth']]) |
('*' in self.opts['external_auth'][clear_load['eauth']])):
log.warning('Authentication failure of type "eauth" occurred.')
return ''
if not self.loadauth.time_auth(clear_load):
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return self.loadauth.mk_token(clear_load)
except Exception as exc:
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
return ''
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
# check blacklist/whitelist
good = True
# Check if the user is blacklisted
for user_re in self.opts['client_acl_blacklist'].get('users', []):
if re.match(user_re, clear_load['user']):
good = False
break
# check if the cmd is blacklisted
for module_re in self.opts['client_acl_blacklist'].get('modules', []):
# if this is a regular command, its a single function
if type(clear_load['fun']) == str:
funs_to_check = [clear_load['fun']]
# if this a compound function
else:
funs_to_check = clear_load['fun']
for fun in funs_to_check:
if re.match(module_re, fun):
good = False
break
if good is False:
log.error(
'{user} does not have permissions to run {function}. Please '
'contact your local administrator if you believe this is in '
'error.\n'.format(
user=clear_load['user'],
function=clear_load['fun']
)
)
return ''
# to make sure we don't step on anyone else's toes
del good
# Check for external auth calls
if extra.get('token', False):
# A token was passed, check it
try:
token = self.loadauth.get_tok(extra['token'])
except Exception as exc:
log.error(
'Exception occurred when generating auth token: {0}'.format(
exc
)
)
return ''
if not token:
log.warning('Authentication failure of type "token" occurred.')
return ''
if token['eauth'] not in self.opts['external_auth']:
log.warning('Authentication failure of type "token" occurred.')
return ''
if not ((token['name'] in self.opts['external_auth'][token['eauth']]) |
('*' in self.opts['external_auth'][token['eauth']])):
log.warning('Authentication failure of type "token" occurred.')
return ''
good = self.ckminions.auth_check(
self.opts['external_auth'][token['eauth']][token['name']]
if token['name'] in self.opts['external_auth'][token['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "token" occurred.'
)
return ''
clear_load['user'] = token['name']
log.debug('Minion tokenized user = "{0}"'.format(clear_load['user']))
elif 'eauth' in extra:
if extra['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
try:
name = self.loadauth.load_name(extra)
if not ((name in self.opts['external_auth'][extra['eauth']]) |
('*' in self.opts['external_auth'][extra['eauth']])):
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
if not self.loadauth.time_auth(extra):
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
except Exception as exc:
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
return ''
good = self.ckminions.auth_check(
self.opts['external_auth'][extra['eauth']][name]
if name in self.opts['external_auth'][extra['eauth']]
else self.opts['external_auth'][extra['eauth']]['*'],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
clear_load['user'] = name
# Verify that the caller has root on master
elif 'user' in clear_load:
if clear_load['user'].startswith('sudo_'):
# If someone can sudo, allow them to act as root
if clear_load.get('key', 'invalid') == self.key.get('root'):
clear_load.pop('key')
elif clear_load.pop('key') != self.key[self.opts.get('user', 'root')]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif clear_load['user'] == self.opts.get('user', 'root'):
if clear_load.pop('key') != self.key[self.opts.get('user', 'root')]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif clear_load['user'] == 'root':
if clear_load.pop('key') != self.key.get(self.opts.get('user', 'root')):
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif clear_load['user'] == getpass.getuser():
if clear_load.pop('key') != self.key.get(clear_load['user']):
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
else:
if clear_load['user'] in self.key:
# User is authorised, check key and check perms
if clear_load.pop('key') != self.key[clear_load['user']]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
if clear_load['user'] not in self.opts['client_acl']:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
good = self.ckminions.auth_check(
self.opts['client_acl'][clear_load['user']],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "user" '
'occurred.'
)
return ''
else:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
else:
if clear_load.pop('key') != self.key[getpass.getuser()]:
log.warning(
'Authentication failure of type "other" occurred.'
)
return ''
# Retrieve the minions list
minions = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob')
)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions
}
}
# Retrieve the jid
if not clear_load['jid']:
clear_load['jid'] = salt.utils.prep_jid(
self.opts['cachedir'],
self.opts['hash_type'],
extra.get('nocache', False)
)
self.event.fire_event({'minions': minions}, clear_load['jid'])
jid_dir = salt.utils.jid_dir(
clear_load['jid'],
self.opts['cachedir'],
self.opts['hash_type']
)
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, 'new_job') # old dup event
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
# Verify the jid dir
if not os.path.isdir(jid_dir):
os.makedirs(jid_dir)
# Save the invocation information
self.serial.dump(
clear_load,
salt.utils.fopen(os.path.join(jid_dir, '.load.p'), 'w+b')
)
# save the minions to a cache so we can see in the UI
self.serial.dump(
minions,
salt.utils.fopen(os.path.join(jid_dir, '.minions.p'), 'w+b')
)
if self.opts['ext_job_cache']:
try:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load)
except KeyError:
log.critical(
'The specified returner used for the external job cache '
'"{0}" does not have a save_load function!'.format(
self.opts['ext_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'user' in clear_load:
log.info(
'User {user} Published command {fun} with jid {jid}'.format(
**clear_load
)
)
load['user'] = clear_load['user']
else:
log.info(
'Published command {fun} with jid {jid}'.format(
**clear_load
)
)
log.debug('Published command details {0}'.format(load))
payload['load'] = self.crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Send 0MQ to the publisher
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUSH)
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
pub_sock.send(self.serial.dumps(payload))
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions
}
}
|
stream_infer.py
|
#! /usr/bin/env python3
# Copyright(c) 2017 Intel Corporation.
# License: MIT See LICENSE file in root directory.
# Python script to start a USB camera and feed frames to
# the Movidius Neural Compute Stick that is loaded with a
# CNN graph file and report the inferred results
import sys
sys.path.insert(0, "../../ncapi2_shim")
import mvnc_simple_api as fx
#import mvnc.mvncapi as fx
import gi
gi.require_version('Gst', '1.0')
gi.require_version('Gdk', '3.0')
gi.require_version('Gtk', '3.0')
gi.require_version('GLib','2.0')
gi.require_version('GstVideo', '1.0')
from gi.repository import Gdk
from gi.repository import Gst
from gi.repository import Gtk
from gi.repository import GstVideo
from gi.repository import GLib
from gi.repository import GdkX11
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
from queue import Queue
from gi.repository import GLib
from threading import Thread
from gi.repository import Gst
import numpy
'''
NETWORK_IMAGE_WIDTH = 227 # the width of images the network requires
NETWORK_IMAGE_HEIGHT = 227 # the height of images the network requires
NETWORK_IMAGE_FORMAT = "BGR" # the format of the images the network requires
NETWORK_DIRECTORY = "../../caffe/GenderNet/" # directory of the network this directory needs to
# have 3 files: "graph", "stat.txt" and "categories.txt"
NETWORK_STAT_TXT = "./gendernet_stat.txt" # stat.txt for networ
NETWORK_CATEGORIES_TXT = "./gendernet_categories.txt" # categories.txt for network
NETWORK_IMAGE_WIDTH = 224 # the width of images the network requires
NETWORK_IMAGE_HEIGHT = 224 # the height of images the network requires
NETWORK_IMAGE_FORMAT = "BGR" # the format of the images the network requires
NETWORK_DIRECTORY = "../../caffe/GoogLeNet/" # directory of the network this directory needs to
# have 3 files: "graph", "stat.txt" and "categories.txt"
NETWORK_STAT_TXT = "./googlenet_stat.txt" # stat.txt for networ
NETWORK_CATEGORIES_TXT = "./googlenet_categories.txt" # categories.txt for network
'''
NETWORK_IMAGE_WIDTH = 227 # the width of images the network requires
NETWORK_IMAGE_HEIGHT = 227 # the height of images the network requires
NETWORK_IMAGE_FORMAT = "BGR" # the format of the images the network requires
NETWORK_DIRECTORY = "../../caffe/SqueezeNet/" # directory of the network
NETWORK_STAT_TXT = "./squeezenet_stat.txt" # stat.txt for networ
NETWORK_CATEGORIES_TXT = "./squeezenet_categories.txt" # categories.txt for network
'''
NETWORK_IMAGE_WIDTH = 227 # the width of images the network requires
NETWORK_IMAGE_HEIGHT = 227 # the height of images the network requires
NETWORK_IMAGE_FORMAT = "BGR" # the format of the images the network requires
NETWORK_DIRECTORY = "../../caffe/AlexNet/" # directory of the network this directory needs to
# have 3 files: "graph", "stat.txt" and "categories.txt
NETWORK_STAT_TXT = "./alexnet_stat.txt" # stat.txt for networ
NETWORK_CATEGORIES_TXT = "./alexnet_categories.txt" # categories.txt for network
'''
# The capture dimensions of the image need to be a multiple of 4 (the image will be cropped back down for inferences)
NETWORK_IMAGE_WIDTH_4 = NETWORK_IMAGE_WIDTH + ((4 - (NETWORK_IMAGE_WIDTH % 4)) % 4)
NETWORK_IMAGE_HEIGHT_4 = NETWORK_IMAGE_HEIGHT + ((4 - (NETWORK_IMAGE_HEIGHT % 4)) % 4)
GST_APP_NAME = "app" # gstreamer sink name
GST_VIEW_NAME = "view" # gstreamer view sink name
CAMERA_INDEX = "0" # 0 is first usb cam, 1 the second etc.
SINK_NAME="xvimagesink" # use for x86-64 platforms
#SINK_NAME="glimagesink" # use for Raspian Jessie platforms
# Globals for the program
gGstAppSink = None
gIt = None
gRunning = False
gOt = None
gNetworkMean = None
gNetworkStd = None
gNetworkCategories = None
gUpdateq = Queue()
gGraph = None
gCallback = None
gResultLabel = Gtk.Label() # label to display inferences in
gDrawAreaSink = Gtk.DrawingArea() # DrawingArea to display camera feed in.
# end of globals for the program
# connected to the the GUI window and is called when the window is closed
def window_closed (widget, event, pipeline):
"""
:param widget: the GTK window
:param event:
:param pipeline: the Gst Pipeline
:return: nothing
"""
widget.hide()
pipeline.set_state(Gst.State.NULL)
Gtk.main_quit ()
# Start the input and output worker threads for the application
def start_thread():
""" start threads and idle handler (update_ui) for callback dispatching
"""
global gIt, gOt, gRunning
gRunning = True
GLib.idle_add(update_ui) # TODO: inefficient, find a thread safe signal/event posting method
gIt = Thread(target = input_thread)
gIt.start()
gOt = Thread(target = output_thread)
gOt.start()
#Stop worker threads for the application. Blocks until threads are terminated
def stop_thread():
""" stop threads
"""
global gIt, gOt, gRunning
# Set gRunning flag to false so worker threads know to terminate
gRunning = False;
# Wait for worker threads to terminate.
gIt.join()
gOt.join()
# Called when no higher priority events are pending in the main loop.
# Will call the callback function with the data from the update queue.
def update_ui():
"""
Dispatch callbacks with post processed inference results
in the main thread context
:return: running global status
"""
global gRunning
while not gUpdateq.empty():
#get item from update queue
(out, cookie) = gUpdateq.get()
gCallback(cookie, out)
return gRunning
# Worker thread function for input to MVNC.
# Gets a preprocessed camera sample and calls the MVNC API to do an inference on the image.
def input_thread():
""" input thread function
"""
global gRunning
frame_number = 0
while gRunning:
preprocessed_image_buf = get_sample()
if preprocessed_image_buf is not None: # TODO: eliminate busy looping before samples are available
#print("loading %s : %s" % (preprocessed_image_buf.shape, preprocessed_image_buf ))
gGraph.LoadTensor(preprocessed_image_buf ,"frame %s" % frame_number)
frame_number=frame_number + 1
print("Input thread terminating.")
# Worker thread function to handle inference results from the MVNC stick
def output_thread():
""" output thread function
for getting inference results from Movidius NCS
running graph specific post processing of inference result
queuing the results for main thread callbacks
"""
global gRunning
try:
while gRunning:
try:
inference_result, user_data = gGraph.GetResult()
gUpdateq.put((postprocess(inference_result), user_data))
except KeyError:
# This error occurs when GetResult can't access the user param from the graph, we're just ignoring it for now
#print("KeyError")
pass
except Exception as e:
print(e)
pass
print("Output thread terminating")
# Get a sample from the camera and preprocess it so that its ready for
# to be sent to the MVNC stick to run an inference on it.
def get_sample():
""" get a preprocessed frame to be pushed to the graph
"""
sample = gGstAppSink.get_property('last-sample')
if sample:
# a sample was available from the camera via the gstreamer app sink
buf = sample.get_buffer()
result, info = buf.map(Gst.MapFlags.READ)
preprocessed_image_buffer = preprocess(info.data)
buf.unmap(info)
del buf
del sample
return preprocessed_image_buffer
return None
# Read the graph file for the network from the filesystem.
def get_graph_from_disk():
"""
:return: the bytes that were read from disk which are the binary graph file contents
"""
with open(NETWORK_DIRECTORY + "graph", mode='rb') as file:
graph_blob = file.read()
return graph_blob
# preprocess the camera images to create images that are suitable for the
# network. Specifically resize to appropriate height and width
# and make sure the image format is correct. This is called by the input worker
# thread function prior to passing the image the MVNC API.
def preprocess(data):
""" preprocess a video frame
input - in the format specified by rawinputformat() method
output - in the format required by the graph
"""
resize_width = NETWORK_IMAGE_WIDTH_4
resize_height = NETWORK_IMAGE_HEIGHT_4
buffer_data_type = numpy.dtype(numpy.uint8) # the buffer contains 8 bit unsigned ints that are the RGB Values of the image
image_unit8_array = numpy.frombuffer(data, buffer_data_type, -1, 0) # get the input image into an array
actual_stream_width = int(round((2*resize_width+1)/2)) # hack, rather get this from the app sink
image_unit8_array = image_unit8_array.reshape(actual_stream_width,resize_height,3)
image_unit8_array = image_unit8_array[0:(resize_height-1),0:(resize_width-1),0:3] # crop to network input size
image_float_array = image_unit8_array.astype('float32')
#Preprocess image changing the RGB pixel values to the values the network needs
# to do this we subtract the mean and multiply the std for each channel (R, G and B)
# these mean and std values come from the stat.txt file that must accompany the
# graph file for the network.
for i in range(3):
image_float_array[:,:,i] = (image_float_array[:,:,i] - gNetworkMean[i]) * gNetworkStd[i]
# Finally we return the values as Float16 rather than Float32 as that is what the network expects.
return image_float_array.astype(numpy.float16)
# post process the results from MVNC API to create a human
# readable string.
def postprocess(output):
""" postprocess an inference result
input - in the format produced by the graph
output - in a human readable format
"""
order = output.argsort()
last = len(gNetworkCategories)-1
text = gNetworkCategories[order[last-0]] + ' (' + '{0:.2f}'.format(output[order[last-0]]*100) + '%) '
# to get top 5 use this code
#for i in range(0, min(5, last+1)):
# text += gNetworkCategories[order[last-i]] + ' (' + '{0:.2f}'.format(output[order[last-i]]*100) + '%) '
return text
def put_output(userobj, out):
""" Method for receiving the (postprocessed) results
userobj - user object passed to the FathomExpress
out - output
"""
global gResultLabel
global gDrawAreaSink
gResultLabel.set_text("%s\n" % out)
# main entry point for the program
if __name__=="__main__":
Gdk.init([])
Gtk.init([])
Gst.init([])
# Load preprocessing data for network
# load means and stds from stat.txt
with open(NETWORK_STAT_TXT, 'r') as f:
gNetworkMean = f.readline().split()
gNetworkStd = f.readline().split()
for i in range(3):
gNetworkMean[i] = 255 * float(gNetworkMean[i])
gNetworkStd[i] = 1.0 / (255.0 * float(gNetworkStd[i]))
# Load categories from categories.txt
gNetworkCategories = []
with open(NETWORK_CATEGORIES_TXT, 'r') as f:
for line in f:
cat = line.split('\n')[0]
if cat != 'classes':
gNetworkCategories.append(cat)
f.close()
fx.SetGlobalOption(fx.GlobalOption.LOGLEVEL, 3)
# For this program we will always use the first MVNC device.
ncs_names = fx.EnumerateDevices()
if (len(ncs_names) < 1):
print("Error - No NCS devices detected. Make sure your device is connected.")
quit()
# the camera source string for USB cameras. They will be /dev/video0, /dev/video1, etc.
# for this sample we will open the first camera (/dev/video0)
cam_src_str = "v4l2src device=/dev/video" + CAMERA_INDEX
app_launch_str = "\
videoscale ! video/x-raw, width=%s, height=%s ! \
videoconvert ! video/x-raw, format=%s ! \
appsink name=%s max-buffers=1 drop=true enable-last-sample=true" % (NETWORK_IMAGE_WIDTH_4, NETWORK_IMAGE_HEIGHT_4, NETWORK_IMAGE_FORMAT, GST_APP_NAME )
view_launch_str = "\
queue max-size-buffers=2 leaky=downstream ! \
%s name=%s" % (SINK_NAME, GST_VIEW_NAME)
# a gstreamer sink that is a gtk drawing area
# this is the camera preview display.
gDrawAreaSink = Gtk.DrawingArea()
gDrawAreaSink.set_double_buffered(True)
gDrawAreaSink.name = GST_VIEW_NAME
# build GStreamer launch string
source2tee = "%s ! tee name=t" % cam_src_str
tee2view = "t. ! %s" % view_launch_str
tee2app = "t. ! %s" % app_launch_str
launch = "%s %s %s" % (source2tee, tee2view, tee2app)
gstPipeline = Gst.parse_launch(launch)
gGstAppSink = gstPipeline.get_by_name(GST_APP_NAME)
# build GUI
window = Gtk.Window()
window.connect("delete-event", window_closed, gstPipeline)
window.set_default_size (640, 480)
window.set_title ("py_stream_infer")
box = Gtk.Box()
box.set_spacing(5)
box.set_orientation(Gtk.Orientation.VERTICAL)
window.add(box)
box.pack_start(gDrawAreaSink, True, True, 0)
gResultLabel = Gtk.Label()
box.pack_start(gResultLabel, False, True, 0)
window.show_all()
window.realize()
gstPipeline.get_by_name(GST_VIEW_NAME).set_window_handle(gDrawAreaSink.get_window().get_xid())
# Initialize the MVNC device
dev = fx.Device(ncs_names[0])
dev.OpenDevice()
gGraph = dev.AllocateGraph(get_graph_from_disk())
# Initialize input and output threads to pass images to the
# MVNC device and to read results from the inferences made on thos images.
gCallback = put_output
start_thread()
if gstPipeline.set_state(Gst.State.PLAYING) == Gst.StateChangeReturn.FAILURE:
gstPipeline.set_state(Gst.State.NULL)
else:
Gst.debug_bin_to_dot_file (gstPipeline,Gst.DebugGraphDetails.ALL,'playing-pipeline') # export GST_DEBUG_DUMP_DOT_DIR=/tmp/
Gtk.main()
Gst.debug_bin_to_dot_file (gstPipeline,Gst.DebugGraphDetails.ALL,'shutting-down-pipeline')
gstPipeline.set_state(Gst.State.NULL)
print("exiting main loop")
gGraph.DeallocateGraph()
dev.CloseDevice()
print("mvnc device closed")
stop_thread()
|
test_decimal.py
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
www2.hursley.ibm.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import glob
import math
import os, sys
import pickle, copy
import unittest
from decimal import *
import numbers
from test.test_support import (TestSkipped, run_unittest, run_doctest,
is_resource_enabled, _check_py3k_warnings)
import random
try:
import threading
except ImportError:
threading = None
# Useful Test Constant
Signals = tuple(getcontext().flags.keys())
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = (Clamped, Rounded, Inexact, Subnormal,
Underflow, Overflow, DivisionByZero, InvalidOperation)
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
def init():
global ORIGINAL_CONTEXT
ORIGINAL_CONTEXT = getcontext().copy()
DefaultTestContext = Context(
prec = 9,
rounding = ROUND_HALF_EVEN,
traps = dict.fromkeys(Signals, 0)
)
setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# list of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
#Map the test cases' error names to the actual errors
ErrorNames = {'clamped' : Clamped,
'conversion_syntax' : InvalidOperation,
'division_by_zero' : DivisionByZero,
'division_impossible' : InvalidOperation,
'division_undefined' : InvalidOperation,
'inexact' : Inexact,
'invalid_context' : InvalidOperation,
'invalid_operation' : InvalidOperation,
'overflow' : Overflow,
'rounded' : Rounded,
'subnormal' : Subnormal,
'underflow' : Underflow}
def Nonfunction(*args):
"""Doesn't do anything."""
return None
RoundingDict = {'ceiling' : ROUND_CEILING, #Maps test-case names to roundings.
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw
nameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor',
}
# The following functions return True/False rather than a Decimal instance
LOGICAL_FUNCTIONS = (
'is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum',
)
class DecimalTest(unittest.TestCase):
"""Class which tests the Decimal class against the test cases.
Changed for unittest.
"""
def setUp(self):
self.context = Context()
self.ignore_list = ['#']
# Basically, a # means return NaN InvalidOperation.
# Different from a sNaN in trim
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
def eval_file(self, file):
global skip_expected
if skip_expected:
raise TestSkipped
return
for line in open(file):
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except DecimalException, exception:
#Exception raised where there shoudn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
return
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = map(lambda x: x.strip().lower(), s.split(':'))
if funct == 'rounding':
value = RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, Nonfunction)
funct(value)
def eval_equation(self, s):
#global DEFAULT_PRECISION
#print DEFAULT_PRECISION
if not TEST_ALL and random.random() < 0.90:
return
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print "Test ", id,
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in skipped_test_ids:
return
fname = nameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [ErrorNames[x.lower()] for x in exceptions]
for exception in Signals:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals, e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = Decimal(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals, e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals, e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print "--", self.context
try:
result = str(funct(*vals))
if fname in LOGICAL_FUNCTIONS:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals, error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print "ERROR:", s
raise
myexceptions = self.getexceptions()
self.context.clear_flags()
with _check_py3k_warnings(quiet=True):
myexceptions.sort()
theirexceptions.sort()
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
return
def getexceptions(self):
return [e for e in Signals if self.context.flags[e]]
def change_precision(self, prec):
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
self.context.Emin = exp
def change_max_exponent(self, exp):
self.context.Emax = exp
def change_clamp(self, clamp):
self.context._clamp = clamp
# The following classes test the behaviour of Decimal according to PEP 327
class DecimalExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
def test_explicit_from_string(self):
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
#unicode strings should be permitted
self.assertEqual(str(Decimal(u'0E-017')), '0E-17')
self.assertEqual(str(Decimal(u'45')), '45')
self.assertEqual(str(Decimal(u'-Inf')), '-Infinity')
self.assertEqual(str(Decimal(u'NaN123')), 'NaN123')
def test_explicit_from_tuples(self):
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_Decimal(self):
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
self.assertNotEqual(id(d), id(e))
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
self.assertNotEqual(id(d), id(e))
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
self.assertNotEqual(id(d), id(e))
def test_explicit_context_create_decimal(self):
nc = copy.copy(getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.failUnless(isinstance(d, Decimal))
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
def test_unicode_digits(self):
test_values = {
u'\uff11': '1',
u'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
u'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class DecimalImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + None', globals())
def test_implicit_from_int(self):
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', globals())
def test_implicit_from_float(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', globals())
def test_implicit_from_Decimal(self):
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
with _check_py3k_warnings():
if 1 / 2 == 0:
# testing with classic division, so add __div__
oplist.append(('/', '__div__', '__rdiv__'))
else:
# testing with -Qnew, so add __truediv__
oplist.append(('/', '__truediv__', '__rtruediv__'))
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class DecimalFormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# check alignment
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
class DecimalArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
for x, y in [(n, n), (n, i), (i, n), (n, f), (f, n),
(s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)]:
self.assert_(x != y)
self.assert_(not (x == y))
self.assert_(not (x < y))
self.assert_(not (x <= y))
self.assert_(not (x > y))
self.assert_(not (x >= y))
# The following are two functions used to test threading in the next class
def thfunc1(cls):
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.synchro.wait()
test2 = d1/d3
cls.finish1.set()
cls.assertEqual(test1, Decimal('0.3333333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.3333333333333333333333333333'))
return
def thfunc2(cls):
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.3333333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
return
class DecimalUseOfContextTest(unittest.TestCase):
'''Unit tests for Use of Context cases in Decimal.'''
try:
import threading
except ImportError:
threading = None
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
#Test the "threading isolation" of a Context.
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
return
if threading is None:
del test_threading
class DecimalUsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.failUnless(dc > da)
self.failUnless(dc >= da)
self.failUnless(da < dc)
self.failUnless(da <= dc)
self.failUnless(da == db)
self.failUnless(da != dc)
self.failUnless(da <= db)
self.failUnless(da >= db)
self.assertEqual(cmp(dc,da), 1)
self.assertEqual(cmp(da,dc), -1)
self.assertEqual(cmp(da,db), 0)
#a Decimal and an int
self.failUnless(dc > 23)
self.failUnless(23 < dc)
self.failUnless(dc == 45)
self.assertEqual(cmp(dc,23), 1)
self.assertEqual(cmp(23,dc), -1)
self.assertEqual(cmp(dc,45), 0)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = map(Decimal, xrange(100))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
# with None
with _check_py3k_warnings():
self.assertFalse(Decimal(1) < None)
self.assertTrue(Decimal(1) > None)
def test_copy_and_deepcopy_methods(self):
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
#just that it's hashable
hash(Decimal(23))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hash(value), hash(int(value)))
#the same hash that to an int
self.assertEqual(hash(Decimal(23)), hash(23))
self.assertRaises(TypeError, hash, Decimal('NaN'))
self.assert_(hash(Decimal('Inf')))
self.assert_(hash(Decimal('-Inf')))
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
c = getcontext()
old_precision = c.prec
x = Decimal("123456789.1")
c.prec = 6
h1 = hash(x)
c.prec = 10
h2 = hash(x)
c.prec = 16
h3 = hash(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = old_precision
def test_min_and_max_methods(self):
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.failUnless(min(d1,d2) is d1)
self.failUnless(min(d2,d1) is d1)
self.failUnless(max(d1,d2) is d2)
self.failUnless(max(d2,d1) is d2)
#between Decimal and long
self.failUnless(min(d1,l2) is d1)
self.failUnless(min(l2,d1) is d1)
self.failUnless(max(l1,d2) is d2)
self.failUnless(max(d2,l1) is d2)
def test_as_nonzero(self):
#as false
self.failIf(Decimal(0))
#as true
self.failUnless(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
# result type of string methods should be str, not unicode
unicode_inputs = [u'123.4', u'0.5E2', u'Infinity', u'sNaN',
u'-0.0E100', u'-NaN001', u'-Inf']
for u in unicode_inputs:
d = Decimal(u)
self.assertEqual(type(str(d)), str)
self.assertEqual(type(repr(d)), str)
self.assertEqual(type(d.to_eng_string()), str)
def test_tonum_methods(self):
#Test float, int and long methods.
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#long
self.assertEqual(long(d1), 66)
self.assertEqual(long(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
def test_eval_round_trip(self):
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
#inf
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
#coefficient in infinity should be ignored
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_immutability_operations(self):
# Do operations and check that it didn't change change internal objects.
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
return
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__div__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__long__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__nonzero__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdiv__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_subclassing(self):
# Different behaviours when subclassing Decimal
class MyDecimal(Decimal):
pass
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertTrue(type(d) is Decimal)
d = d1.max(d2)
self.assertTrue(type(d) is Decimal)
def test_implicit_context(self):
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class DecimalPythonAPItests(unittest.TestCase):
def test_abc(self):
self.assert_(issubclass(Decimal, numbers.Number))
self.assert_(not issubclass(Decimal, numbers.Real))
self.assert_(isinstance(Decimal(0), numbers.Number))
self.assert_(not isinstance(Decimal(0), numbers.Real))
def test_pickle(self):
d = Decimal('-3.141590000')
p = pickle.dumps(d)
e = pickle.loads(p)
self.assertEqual(d, e)
def test_int(self):
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
self.assertRaises(ValueError, long, Decimal('-nan'))
self.assertRaises(ValueError, long, Decimal('snan'))
self.assertRaises(OverflowError, long, Decimal('inf'))
self.assertRaises(OverflowError, long, Decimal('-inf'))
def test_trunc(self):
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
class ContextAPItests(unittest.TestCase):
def test_pickle(self):
c = Context()
e = pickle.loads(pickle.dumps(c))
for k in vars(c):
v1 = vars(c)[k]
v2 = vars(e)[k]
self.assertEqual(v1, v2)
def test_equality_with_other_types(self):
self.assert_(Decimal(10) in ['a', 1.0, Decimal(10), (1,2), {}])
self.assert_(Decimal(10) not in ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
class WithStatementTest(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assert_(orig_ctx is final_ctx, 'did not restore context correctly')
self.assert_(orig_ctx is not set_ctx, 'did not copy the context')
self.assert_(set_ctx is enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assert_(orig_ctx is final_ctx, 'did not restore context correctly')
self.assert_(set_ctx.prec == new_ctx.prec, 'did not set correct context')
self.assert_(new_ctx is not set_ctx, 'did not copy the context')
self.assert_(set_ctx is enter_ctx, '__enter__ returned wrong context')
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
context = Context(prec=9, Emin = -999999999, Emax = 999999999,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-1000000009")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
context._raise_error(flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
with _check_py3k_warnings(quiet=True):
expected_flags.sort()
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
with _check_py3k_warnings(quiet=True):
new_flags.sort()
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_main(arith=False, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init()
global TEST_ALL, DEBUG
TEST_ALL = arith or is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = [
DecimalExplicitConstructionTest,
DecimalImplicitConstructionTest,
DecimalArithmeticOperatorsTest,
DecimalFormatTest,
DecimalUseOfContextTest,
DecimalUsabilityTest,
DecimalPythonAPItests,
ContextAPItests,
DecimalTest,
WithStatementTest,
ContextFlags
]
else:
test_classes = [DecimalTest]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(DecimalTest, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
import decimal as DecimalModule
run_doctest(DecimalModule, verbose)
finally:
setcontext(ORIGINAL_CONTEXT)
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
dishes.py
|
import multiprocessing as mp
def washer(dishes, output):
for dish in dishes:
print('Myję talerz na', dish)
output.put(dish)
def dryer(input):
while True:
dish = input.get()
print('Wycieram talerz na', dish)
input.task_done()
dish_queue = mp.JoinableQueue()
dryer_proc = mp.Process(target=dryer, args=(dish_queue,))
dryer_proc.daemon = True
dryer_proc.start()
dishes = ['sałatkę', 'pieczywo', 'danie główne', 'deser']
washer(dishes, dish_queue)
dish_queue.join()
|
mininet_multicast_pox.py
|
#!/usr/bin/env python
from groupflow_shared import *
from mininet.net import *
from mininet.node import OVSSwitch, UserSwitch
from mininet.link import TCLink
from mininet.log import setLogLevel
from mininet.cli import CLI
from mininet.node import Node, RemoteController
from scipy.stats import truncnorm
from numpy.random import randint, uniform
from subprocess import *
import sys
import signal
from time import sleep, time
from datetime import datetime
from multiprocessing import Process, Pipe
import numpy as np
ENABLE_FIXED_GROUP_SIZE = True
FIXED_GROUP_SIZE = 4
def mcastTest(topo, interactive = False, hosts = [], log_file_name = 'test_log.log', util_link_weight = 10, link_weight_type = 'linear', replacement_mode='none', pipe = None):
membership_mean = 0.1
membership_std_dev = 0.25
membership_avg_bound = float(len(hosts)) / 8.0
test_groups = []
test_group_launch_times = []
test_success = True
# Launch the external controller
pox_arguments = []
if 'periodic' in replacement_mode:
pox_arguments = ['pox.py', 'log', '--file=pox.log,w', 'openflow.discovery', '--link_timeout=30', 'openflow.keepalive',
'openflow.flow_tracker', '--query_interval=1', '--link_max_bw=19', '--link_cong_threshold=13', '--avg_smooth_factor=0.5', '--log_peak_usage=True',
'misc.benchmark_terminator', 'openflow.igmp_manager', 'misc.groupflow_event_tracer',
'openflow.groupflow', '--util_link_weight=' + str(util_link_weight), '--link_weight_type=' + link_weight_type, '--flow_replacement_mode=' + replacement_mode,
'--flow_replacement_interval=15',
'log.level', '--WARNING', '--openflow.flow_tracker=INFO']
else:
pox_arguments = ['pox.py', 'log', '--file=pox.log,w', 'openflow.discovery', '--link_timeout=30', 'openflow.keepalive',
'openflow.flow_tracker', '--query_interval=1', '--link_max_bw=19', '--link_cong_threshold=13', '--avg_smooth_factor=0.5', '--log_peak_usage=True',
'misc.benchmark_terminator', 'openflow.igmp_manager', 'misc.groupflow_event_tracer',
'openflow.groupflow', '--util_link_weight=' + str(util_link_weight), '--link_weight_type=' + link_weight_type, '--flow_replacement_mode=' + replacement_mode,
'--flow_replacement_interval=15',
'log.level', '--WARNING', '--openflow.flow_tracker=INFO']
print 'Launching external controller: ' + str(pox_arguments[0])
print 'Launch arguments:'
print ' '.join(pox_arguments)
with open(os.devnull, "w") as fnull:
pox_process = Popen(pox_arguments, stdout=fnull, stderr=fnull, shell=False, close_fds=True)
# Allow time for the log file to be generated
sleep(1)
# Determine the flow tracker log file
pox_log_file = open('./pox.log', 'r')
flow_log_path = None
event_log_path = None
got_flow_log_path = False
got_event_log_path = False
while (not got_flow_log_path) or (not got_event_log_path):
pox_log = pox_log_file.readline()
if 'Writing flow tracker info to file:' in pox_log:
pox_log_split = pox_log.split()
flow_log_path = pox_log_split[-1]
got_flow_log_path = True
if 'Writing event trace info to file:' in pox_log:
pox_log_split = pox_log.split()
event_log_path = pox_log_split[-1]
got_event_log_path = True
print 'Got flow tracker log file: ' + str(flow_log_path)
print 'Got event trace log file: ' + str(event_log_path)
print 'Controller initialized'
pox_log_offset = pox_log_file.tell()
pox_log_file.close()
# External controller
net = Mininet(topo, controller=RemoteController, switch=OVSSwitch, link=TCLink, build=False, autoSetMacs=True)
# pox = RemoteController('pox', '127.0.0.1', 6633)
net.addController('pox', RemoteController, ip = '127.0.0.1', port = 6633)
net.start()
for switch_name in topo.get_switch_list():
#print switch_name + ' route add -host 127.0.0.1 dev lo'
net.get(switch_name).controlIntf = net.get(switch_name).intf('lo')
net.get(switch_name).cmd('route add -host 127.0.0.1 dev lo')
#print 'pox' + ' route add -host ' + net.get(switch_name).IP() + ' dev lo'
net.get('pox').cmd('route add -host ' + net.get(switch_name).IP() + ' dev lo')
#print net.get(switch_name).cmd('ifconfig')
topo.mcastConfig(net)
#print 'Controller network configuration:'
#print net.get('pox').cmd('ifconfig')
#print net.get('pox').cmd('route')
sleep_time = 8 + (float(len(hosts))/8)
print 'Waiting ' + str(sleep_time) + ' seconds to allow for controller topology discovery'
sleep(sleep_time) # Allow time for the controller to detect the topology
try:
if interactive:
CLI(net)
else:
mcast_group_last_octet = 1
mcast_port = 5010
rand_seed = int(time())
print 'Using random seed: ' + str(rand_seed)
np.random.seed(rand_seed)
host_join_probabilities = generate_group_membership_probabilities(hosts, membership_mean, membership_std_dev, membership_avg_bound)
print 'Host join probabilities: ' + ', '.join(str(p) for p in host_join_probabilities)
host_join_sum = sum(p[1] for p in host_join_probabilities)
print 'Measured mean join probability: ' + str(host_join_sum / len(host_join_probabilities))
print 'Predicted average group size: ' + str(host_join_sum)
i = 1
congested_switch_num_links = 0
while True:
print 'Generating multicast group #' + str(i)
# Choose a sending host using a uniform random distribution
sender_index = randint(0,len(hosts))
sender_host = hosts[sender_index]
receivers = []
if ENABLE_FIXED_GROUP_SIZE:
while len(receivers) < FIXED_GROUP_SIZE:
receiver_index = randint(0,len(hosts))
if receiver_index == sender_index:
continue
receivers.append(hosts[receiver_index])
receivers = list(set(receivers))
else:
# Choose a random number of receivers by comparing a uniform random variable
# against the previously generated group membership probabilities
for host_prob in host_join_probabilities:
p = uniform(0, 1)
if p <= host_prob[1]:
receivers.append(host_prob[0])
# Initialize the group
# Note - This method of group IP generation will need to be modified slightly to support more than
# 255 groups
mcast_ip = '224.1.1.{last_octet}'.format(last_octet = str(mcast_group_last_octet))
test_groups.append(StaticMulticastGroupDefinition(sender_host, receivers, mcast_ip, mcast_port, mcast_port + 1))
launch_time = time()
test_group_launch_times.append(launch_time)
print 'Launching multicast group #' + str(i) + ' at time: ' + str(launch_time)
print 'Sender: ' + str(sender_host)
print 'Receivers: ' + str(receivers)
test_groups[-1].launch_mcast_applications(net)
mcast_group_last_octet = mcast_group_last_octet + 1
mcast_port = mcast_port + 2
i += 1
wait_time = 5 + uniform(0, 5)
# Read from the log file to determine if a link has become overloaded, and cease generating new groups if so
print 'Check for congested link...'
congested_link = False
pox_log_file = open('./pox.log', 'r')
pox_log_file.seek(pox_log_offset)
done_reading = False
while not done_reading:
line = pox_log_file.readline()
if 'Network peak link throughput (Mbps):' in line:
line_split = line.split(' ')
print 'Peak Usage (Mbps): ' + line_split[-1],
if 'Network avg link throughput (Mbps):' in line:
line_split = line.split(' ')
print 'Mean Usage (Mbps): ' + line_split[-1],
if 'FlowStats: Fully utilized link detected!' in line:
line_split = line.split(' ')
congested_link = True
done_reading = True
if 'Multicast topology changed, recalculating all paths' in line or 'Path could not be determined for receiver' in line:
print 'ERROR: Network topology changed unexpectedly.'
print line
test_success = False
done_reading = True
if time() - launch_time > wait_time:
done_reading = True
pox_log_offset = pox_log_file.tell()
pox_log_file.close()
if congested_link:
print 'Detected fully utilized link, terminating simulation.'
break
if not test_success:
print 'Detected network connectivity error, terminating simulation.'
break
else:
print 'No congestion detected.'
recv_packets = 0
lost_packets = 0
print 'Terminating network applications'
for group in test_groups:
group.terminate_mcast_applications()
print 'Terminating controller'
pox_process.send_signal(signal.SIGINT)
sleep(1)
print 'Waiting for network application termination...'
for group in test_groups:
group.wait_for_application_termination()
print 'Network applications terminated'
print 'Waiting for controller termination...'
pox_process.send_signal(signal.SIGKILL)
pox_process.wait()
print 'Controller terminated'
pox_process = None
net.stop()
if not interactive and test_success:
write_final_stats_log(log_file_name, flow_log_path, event_log_path, membership_mean, membership_std_dev, membership_avg_bound, test_groups, test_group_launch_times, topo)
if not test_success:
call('rm -rfv ' + str(flow_log_path), shell=True)
call('rm -rfv ' + str(event_log_path), shell=True)
except BaseException as e:
print str(e)
test_success = False
if pipe is not None:
pipe.send(test_success)
pipe.close()
topos = { 'mcast_test': ( lambda: MulticastTestTopo() ) }
def print_usage_text():
print 'GroupFlow Multicast Testing with Mininet'
print 'Usage:'
print '1) No arguments:'
print '> mininet_multicast_pox'
print 'If no arguments are provided, the script will launch a hard-coded test topology with Mininet in interactive mode.'
print ''
print '2) Custom topology:'
print '> mininet_multicast_pox <topology_path>'
print 'topology_path: If a single argument is given, the argument will be interpreted as a path to a BRITE topology. Otherwise, this functions identically to the no argument mode.'
print ''
print '3) Automated benchmarking:'
print '> mininet_multicast_pox <topology_path> <iterations_to_run> <log_file_prefix> <index_of_first_log_file> <parameter_sets (number is variable and unlimited)>'
print 'Parameter sets have the form: flow_replacement_mode,link_weight_type,util_link_weight'
print 'The topology path "manhattan" is currently hardcoded to generate a 20 Mbps, 5x5 Manhattan grid topology'
if __name__ == '__main__':
setLogLevel( 'info' )
if len(sys.argv) >= 2:
if '-h' in str(sys.argv[1]) or 'help' in str(sys.argv[1]):
print_usage_text()
sys.exit()
if len(sys.argv) >= 6:
# Automated simulations - Differing link usage weights in Groupflow Module
log_prefix = sys.argv[3]
num_iterations = int(sys.argv[2])
first_index = int(sys.argv[4])
util_params = []
for param_index in range(5, len(sys.argv)):
param_split = sys.argv[param_index].split(',')
util_params.append((param_split[0], param_split[1], float(param_split[2])))
topo = None
if 'manhattan' in sys.argv[1]:
print 'Generating Manhattan Grid Topology'
topo = ManhattanGridTopo(5, 5, 20, 1, True)
else:
print 'Generating BRITE Specified Topology'
topo = BriteTopo(sys.argv[1])
hosts = topo.get_host_list()
start_time = time()
num_success = 0
num_failure = 0
print 'Simulations started at: ' + str(datetime.now())
for i in range(0,num_iterations):
for util_param in util_params:
test_success = False
while not test_success:
parent_pipe, child_pipe = Pipe()
p = Process(target=mcastTest, args=(topo, False, hosts, log_prefix + '_' + ','.join([util_param[0], util_param[1], str(util_param[2])]) + '_' + str(i + first_index) + '.log', util_param[2], util_param[1], util_param[0], child_pipe))
sim_start_time = time()
p.start()
p.join()
sim_end_time = time()
# Make extra sure the network terminated cleanly
call(['python', 'kill_running_test.py'])
test_success = parent_pipe.recv()
parent_pipe.close()
print 'Test Success: ' + str(test_success)
if test_success:
num_success += 1
else:
num_failure += 1
print 'Simulation ' + str(i+1) + '_' + ','.join([util_param[0], util_param[1], str(util_param[2])]) + ' completed at: ' + str(datetime.now()) + ' (runtime: ' + str(sim_end_time - sim_start_time) + ' seconds)'
end_time = time()
print ' '
print 'Simulations completed at: ' + str(datetime.now())
print 'Total runtime: ' + str(end_time - start_time) + ' seconds'
print 'Average runtime per sim: ' + str((end_time - start_time) / (num_iterations * len(util_params))) + ' seconds'
print 'Number of failed sims: ' + str(num_failure)
print 'Number of successful sims: ' + str(num_success)
elif len(sys.argv) >= 2:
# Interactive mode - configures POX and multicast routes, but no automatic traffic generation
print 'Launching BRITE defined multicast test topology'
topo = BriteTopo(sys.argv[1])
hosts = topo.get_host_list()
mcastTest(topo, True, hosts)
else:
# Interactive mode with barebones topology
print 'Launching default multicast test topology'
topo = MulticastTestTopo()
hosts = topo.get_host_list()
mcastTest(topo, True, hosts)
|
test_c10d_nccl.py
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
import unittest
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.utils.checkpoint import checkpoint
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_TSAN,
)
import test_c10d_common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook, AbstractProcessGroupWrapperTest
class RendezvousEnvTest(TestCase):
@retry_on_connect_failures
@requires_nccl()
def test_common_errors(self):
if torch.cuda.device_count() == 0:
raise unittest.SkipTest("No GPUs available, skipping test")
vars = {
"WORLD_SIZE": "1",
"RANK": "0",
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(common.find_free_port()),
}
class Env(object):
def __init__(self, vars):
self.env_patcher = mock.patch.dict(os.environ, vars, clear=True)
def __enter__(self):
self.env_patcher.start()
def __exit__(self, type, value, traceback):
self.env_patcher.stop()
def without(d, key):
d = d.copy()
d.pop(key)
return d
def withouts(d, keys):
d = d.copy()
for key in keys:
d.pop(key)
return d
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
with self.assertRaisesRegex(ValueError, "WORLD_SIZE expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
with self.assertRaisesRegex(ValueError, "RANK expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", rank=0)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
c10d.init_process_group(backend="nccl", rank=0, world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(vars):
c10d.init_process_group(backend="nccl")
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "MASTER_ADDR")):
self.assertEqual(None, os.environ.get("MASTER_ADDR"))
with self.assertRaisesRegex(ValueError, "MASTER_ADDR expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "MASTER_PORT")):
self.assertEqual(None, os.environ.get("MASTER_PORT"))
with self.assertRaisesRegex(ValueError, "MASTER_PORT expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?world_size={}".format(1))
_, _, size = next(gen)
self.assertEqual(size, 1)
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
gen = c10d.rendezvous("env://?rank={}".format(0))
_, rank, _ = next(gen)
self.assertEqual(rank, 0)
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?rank={}&world_size={}".format(0, 1))
_, rank, size = next(gen)
self.assertEqual(rank, 0)
self.assertEqual(size, 1)
class TimeoutTest(test_c10d_common.AbstractTimeoutTest, TestCase):
@requires_nccl()
@retry_on_connect_failures
def test_default_store_timeout_nccl(self):
if torch.cuda.device_count() == 0:
raise unittest.SkipTest("No GPUs available, skipping test")
self._test_default_store_timeout("nccl")
@requires_gloo()
@requires_nccl()
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class ProcessGroupNCCLWrapperTest(AbstractProcessGroupWrapperTest):
def setUp(self):
self.num_gpus = torch.cuda.device_count()
if self.num_gpus < 2:
raise unittest.SkipTest("NCCL test requires 2+ GPUs")
super(AbstractProcessGroupWrapperTest, self).setUp()
self._spawn_processes()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
@property
def world_size(self) -> int:
return 2
def _create_wrapper_pg(self, with_new_group=False, timeout=10.0):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=timeout)
)
if with_new_group:
pg = c10d.new_group(backend="nccl", timeout=timedelta(seconds=timeout))
else:
_pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size, timeout=timedelta(seconds=timeout))
pg = c10d._create_process_group_wrapper(
_pg,
"unused",
store,
self.rank,
self.world_size,
timeout=timeout,
)
return pg
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_collective_hang(self):
pg = self._create_wrapper_pg(timeout=2.0)
self._test_collective_hang(pg)
# NOTE: these tests are separated by debug level instead of combined into
# one due to https://github.com/pytorch/pytorch/issues/55967, they can be
# combined after that is resolved.
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_collectives_op_mismatch_debug_mode(self):
pg = self._create_wrapper_pg(with_new_group=True)
self._test_collectives_op_mismatch(pg, use_cuda=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_collectives_op_mismatch(self):
pg = self._create_wrapper_pg(with_new_group=False)
self._test_collectives_op_mismatch(pg, use_cuda=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_collective_shape_mismatch_debug_mode(self):
pg = self._create_wrapper_pg(with_new_group=True)
self._test_collective_shape_mismatch(pg, use_cuda=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_collective_shape_mismatch(self):
pg = self._create_wrapper_pg(with_new_group=False)
self._test_collective_shape_mismatch(pg, use_cuda=True)
class ProcessGroupNCCLNoGPUTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
self.num_gpus = torch.cuda.device_count()
if self.num_gpus > 0:
raise unittest.SkipTest("GPUs are available, skipping test")
def tearDown(self):
pass
@requires_nccl()
def test_init_no_gpus(self):
store = c10d.FileStore(self.file.name, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "ProcessGroupNCCL is only supported with GPUs, no GPUs found!"
):
c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class ProcessGroupNCCLTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
self.num_gpus = torch.cuda.device_count()
if self.num_gpus < 2:
raise unittest.SkipTest("NCCL test requires 2+ GPUs")
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
def tearDown(self):
pass
@requires_nccl()
def test_empty_tensors(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
xs = [torch.cuda.FloatTensor([])]
pg.broadcast(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.allreduce(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.reduce(xs).wait()
self.assertEqual(0, xs[0].numel())
ys = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]
pg.allgather(ys, xs).wait()
for y in ys[0]:
self.assertEqual(0, y.numel())
ys = [torch.cuda.FloatTensor([])]
xs = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]
pg.reduce_scatter(ys, xs).wait()
self.assertEqual(0, ys[0].numel())
@requires_nccl()
def test_broadcast_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
work = pg.broadcast(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i]).cuda(i))
broadcast(tensors, self.rank, rt)
for i in range(self.num_gpus):
self.assertEqual(tensors[i], tensors[rt])
@requires_nccl()
def test_allreduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors, op):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
work = pg.allreduce(tensors, opts)
work.wait()
# Sum
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[i],
)
# Product
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(math.factorial(self.num_gpus))]), tensors[i]
)
# Min
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([1.0]), tensors[i])
# Max
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
self.assertEqual(torch.tensor([self.num_gpus]), tensors[i])
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
allreduce(tensors, op)
@requires_nccl()
def test_reduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce(xs, rootRank, rootTensor, op=None):
opts = c10d.ReduceOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
if op:
opts.reduceOp = op
work = pg.reduce(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
reduce(tensors, self.rank, rt)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[rt],
)
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
reduce(tensors, self.rank, rt, op)
@requires_nccl()
def test_allgather_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather(output_ts, input_ts):
work = pg.allgather(output_ts, input_ts)
work.wait()
tensors = []
output_ts = [[] for _ in range(self.num_gpus)]
for idx, ls in enumerate(output_ts):
for _ in range(self.world_size * self.num_gpus):
ls.append(torch.tensor([0]).cuda(idx))
for i in range(self.num_gpus):
tensors.append(torch.tensor([i]).cuda(i))
allgather(output_ts, tensors)
# Verification
for device_ts in output_ts:
for s_idx, t in enumerate(device_ts):
self.assertEqual(torch.tensor([s_idx]), t)
@requires_nccl()
def test_allgather_base_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# allgather_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
tensor = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(device_id)
allgather_base(output_t, tensor)
# Verification
self.assertEqual(torch.arange(self.world_size), output_t)
@requires_nccl()
def test_allgather_base_basics(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# anticpate an error
with self.assertRaisesRegex(RuntimeError, "output tensor size must be equal to world_size times input tensor size"):
tensor = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=tensor.dtype).cuda(device_id)
# fails the check because output_t is not correctly sized
allgather_base(output_t, tensor)
# anticpate an error
with self.assertRaisesRegex(RuntimeError, "output tensor must have the same type as input tensor"):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(device_id)
# fails the check because the dtype is different
allgather_base(output_t, tensor)
@requires_nccl()
def test_reduce_scatter_base_basics(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# anticpate an error
with self.assertRaisesRegex(RuntimeError, "input tensor must be the same size as output size times world size"):
input_t = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=input_t.dtype).cuda(device_id)
# fails the check because output_t is not correctly sized
reduce_scatter_base(output_t, input_t)
# anticpate an error
with self.assertRaisesRegex(RuntimeError, "input tensor must be the same type as the outut tensor."):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(device_id)
# fails the check because the dtype is different
reduce_scatter_base(output_t, tensor)
@requires_nccl()
def test_reduce_scatter_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter(outputs, input_lists, op):
opts = c10d.ReduceScatterOptions()
opts.reduceOp = op
work = pg.reduce_scatter(outputs, input_lists, opts)
work.wait()
virtual_rank = self.rank * self.world_size
virtual_world_size = self.num_gpus * self.world_size
output = [torch.tensor([0]).cuda(i) for i in range(self.num_gpus)]
# 0 1 2
# 0 [0..11] [1..12]
# 1 [3..14]
# 2
# 3
# Sum
tensor_lists = [
[
torch.tensor([self.rank * self.num_gpus + i + j]).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
expected = torch.tensor(
[
float(self.num_gpus * (self.num_gpus - 1) / 2)
+ (virtual_rank + i) * virtual_world_size
]
)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
# Min
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
expected = torch.tensor([self.rank * self.world_size + i])
self.assertEqual(expected, output[i])
# Max
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
expected = torch.tensor(
[self.rank * self.world_size + i + virtual_world_size - 1]
)
self.assertEqual(expected, output[i])
# Product
tensor_lists = [
[
torch.tensor(
[(self.rank * self.num_gpus + i + j) % virtual_world_size + 1]
).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
expected = torch.tensor([float(math.factorial(virtual_world_size))])
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
@requires_nccl()
def test_reduce_scatter_base_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# reduce_scatter_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
output_t = torch.empty([1]).cuda(device_id)
tensor = torch.arange(self.world_size, dtype=output_t.dtype).cuda(device_id)
reduce_scatter_base(output_t, tensor)
# Verification
self.assertEqual(output_t[0], self.rank * self.world_size)
@requires_nccl()
def test_barrier(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors):
opts = c10d.AllreduceOptions()
work = pg.allreduce(tensors, opts)
return work
# Making the collective to operate on
# 1, 2, 3, 4, .... self.num_gpus GPUs
tensors_list = [[] for _ in range(2, self.num_gpus + 1)]
for i in range(2, self.num_gpus + 1):
for j in range(i):
tensors_list[i - 2].append(torch.tensor([j + 1]).cuda(j))
works = []
for tensors in tensors_list:
work = allreduce(tensors)
works.append(work)
# Barrier will ensure that all previous work is completed
pg.barrier().wait()
for i in range(2, self.num_gpus + 1):
for j in range(i):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(i * (i + 1) / 2)]), tensors_list[i - 2][j]
)
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class DistributedDataParallelTest(test_c10d_common.AbstractDistributedDataParallelTest, MultiProcessTestCase):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
if sys.platform == "win32":
self._spawn_processes()
else:
self._fork_processes()
def _test_nccl_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_multi_device_ids_not_allowed(self):
int_devices = list(range(torch.cuda.device_count()))
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
with self.assertRaisesRegex(ValueError, "device_ids can only be None or contain a single element."):
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_device_ids_None(self):
self._test_nccl_backend(None, None)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_empty_device_ids(self):
# This tests the backward compatibility of accepting an empty list as `device_ids`,
# although we no longer document this in favor of the default value of `None`,
# which is consistent with multi-device modules and CPU modules.
self._test_nccl_backend(None, [])
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_multi_device_module_device_ids_None(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, devices)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(8)
def test_nccl_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_ddp_multi_device_module_config(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self.assertTrue(len(gpus) >= 2, "expecting at least 2 gpus per process")
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus[:2]
model = DoubleGpuNet(gpus)
with self.assertRaisesRegex(
ValueError,
"DistributedDataParallel device_ids and output_device arguments only work with "
"single-device/multiple-device GPU modules or CPU modules",
):
ddp_model = DistributedDataParallel(
model, output_device=gpus[1], process_group=process_group
)
with self.assertRaisesRegex(ValueError, "device_ids can only be None or contain a single element."):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "input module must be on the same type of devices"
):
model.fc1 = model.fc1.cpu()
ddp_model = DistributedDataParallel(model, process_group=process_group)
model = model.cpu()
with self.assertRaisesRegex(ValueError, "device_ids can only be None or contain a single element."):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
def _test_fp16(self, gradient_as_bucket_view=False):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus_for_rank(self.world_size)[self.rank]
model = nn.Linear(1, 1, bias=False).cuda(gpus[0]).half()
nn.init.constant_(model.weight, 1)
ddp_model = DistributedDataParallel(
model,
device_ids=[gpus[0]],
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Input 2**15, so that the gradients will overflow with a
# world_size of 2, unless we normalize the gradient by the
# world_size before the reduction
input = torch.tensor([[2 ** 15]]).cuda(gpus[0]).half()
# Step model
ddp_model.train()
output = ddp_model(input)
loss = output.sum()
loss.backward()
self.assertFalse(any(torch.isinf(p.grad).any() for p in ddp_model.parameters()))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16(self):
self._test_fp16()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_grad_is_view(self):
self._test_fp16(gradient_as_bucket_view=True)
def _test_arbitrary_forward_return_value(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ForwardReturnValueModule(nn.Module):
def __init__(self):
super(ForwardReturnValueModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, fn):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# The first softmax does NOT include fc3 in its autograd graph
# whereas the second softmax DOES. If we pass only the first
# tensor we see in the output to the reducer, it marks the
# gradient for fc3 as ready (because it doesn't show up). If
# downstream uses of this return value choose to differentiate
# against the second output tensor, it would still receive a
# gradient and a callback for this tensor, resulting in a crash.
return fn(
F.softmax(x, dim=1),
F.softmax(self.fc3(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
ForwardReturnValueModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Always run "backward" to ensure the reducer is called by autograd.
# If we don't correctly capture the output tensors from the return value,
# the reducer won't see a hook for the unused parameter, and throw an error.
# The correct capture is what we're testing in this function.
def test(box, unbox):
output = model(input, fn=box)
loss = criterion(unbox(output), target)
loss.backward()
# Test with identity return value
test(
box=lambda x, y: (x, y),
unbox=lambda obj: obj[1],
)
# Test with list return value
test(
box=lambda x, y: ["foo", x, "bar", y],
unbox=lambda obj: obj[3],
)
# Test with tuple return value
test(
box=lambda x, y: ("foo", x, "bar", y),
unbox=lambda obj: obj[3],
)
# Test with dict return value
test(
box=lambda x, y: {"foo": "bar", "a": x, "b": y},
unbox=lambda obj: obj["b"],
)
# Test with list with dict return value
test(
box=lambda x, y: ["foo", "bar", {"a": x, "b": y}],
unbox=lambda obj: obj[2]["b"],
)
# Test with dict with list return value
test(
box=lambda x, y: {"foo": "bar", "list": [0, x, 1, y]},
unbox=lambda obj: obj["list"][3],
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value(self):
self._test_arbitrary_forward_return_value()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value_grad_is_view(self):
self._test_arbitrary_forward_return_value(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_with_lazy_parameters(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Modules with uninitialized parameters"
):
DistributedDataParallel(
torch.nn.LazyLinear(10), process_group=process_group
)
def _test_find_unused_parameters_kwarg(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
torch.cuda.set_device(self.rank)
dist.init_process_group(
backend="nccl",
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}"
)
process_group = c10d.distributed_c10d._get_default_group()
class FindUnusedParametersModule(nn.Module):
def __init__(self):
super(FindUnusedParametersModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# Return the fc3 module so that the caller can invoke it
# outside of the forward function. While this is bad practice,
# we can use it to trigger a reducer error.
return (F.softmax(x, dim=1), self.fc3)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
ddp_model = None
def test_find_unused_parameters(
find_unused_parameters, test_default=False, gradient_as_bucket_view=False
):
if test_default:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
else:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
gradient_as_bucket_view=gradient_as_bucket_view,
)
nonlocal ddp_model
ddp_model = model
output, fc3 = model(input)
output = fc3(output)
loss = criterion(output, target)
loss.backward()
# First test that finding unused params under these conditions correctly
# marks parameter corresponding to fc3 as unused, since it was not used
# in DDP forward pass. Note that the above usage is not a recommended
# way of using DDP, if a module is wrapped within DDP, it should either
# stay unused or be used within DDP module itself.
test_find_unused_parameters(
True, gradient_as_bucket_view=gradient_as_bucket_view,
)
dist.barrier(process_group)
# if find_unused_parameters=False, this would normally result in an
# error, but since fc3 does get used in a way DDP does not know about,
# autograd hooks are indeed called as expected.
try:
test_find_unused_parameters(
False, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# Test find_unused_parameters defaults to False
try:
test_find_unused_parameters(
True, test_default=True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# TODO: Combine the following tests once https://github.com/pytorch/pytorch/issues/55967
# is resolved.
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_debug_detail(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_debug_info(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_debug_off(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_detail(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_info(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_off(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
def _test_multiple_outputs_multiple_backward(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class MultipleOutputModule(nn.Module):
def __init__(self):
super(MultipleOutputModule, self).__init__()
def define_module():
return nn.Sequential(
nn.Linear(2, 10, bias=False),
nn.ReLU(),
nn.Linear(10, 4, bias=False),
nn.ReLU(),
)
self.module0 = define_module()
self.module1 = define_module()
def forward(self, x):
return (
F.softmax(self.module0(x), dim=1),
F.softmax(self.module1(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
MultipleOutputModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Compute loss and gradients for both outputs
output1, output2 = model(input)
loss1 = criterion(output1, target)
loss1.backward()
loss2 = criterion(output2, target)
loss2.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward(self):
self._test_multiple_outputs_multiple_backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward_grad_is_view(self):
self._test_multiple_outputs_multiple_backward(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_no_grad(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class NoGradModule(nn.Module):
def __init__(self):
super(NoGradModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
NoGradModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
input = torch.rand([batch_size, 2], dtype=torch.float)
def check_no_grads():
for p in model.parameters():
self.assertTrue(p.requires_grad)
self.assertIsNone(p.grad)
# After initialization, no parameter has their gradient set.
check_no_grads()
# Run `forward` function with torch.no_grad()
with torch.no_grad():
output = model(input)
self.assertTrue(isinstance(output, torch.Tensor))
# No parameter should have their gradient set.
check_no_grads()
def _test_accumulate_gradients_module(self, gradient_as_bucket_view=False):
# This is NOT the recommended way to implement accumulating grads, but
# we would like to make sure DDP does not mess up with the underlying
# module.
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = self.world_size
model, ddp_model, input, target = self._prepare_single_device_module(
process_group, devices, devices, global_batch_size, gradient_as_bucket_view
)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
# ensure accumulate grads works with no_grad
with torch.no_grad():
ddp_model.train()
ddp_model.module(input)
# Check two model parameters over 4 iterations.
# Use 4 iterations because we alternate between reducing and
# not reducing and want to make sure we switch both ways.
for iteration in range(4):
step_model(model, input, target)
if iteration % 2 == 0:
# Skip gradients sync without calling prepare_for_backward
step_model(
ddp_model.module,
input[self.rank: (self.rank + 1)],
target[self.rank: (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertNotEqual(i.grad, j.grad)
else:
step_model(
ddp_model,
input[self.rank: (self.rank + 1)],
target[self.rank: (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(i.grad, j.grad)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module(self):
self._test_accumulate_gradients_module()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module_with_grad_is_view(self):
self._test_accumulate_gradients_module(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_failure_recovery(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# need to create a separate file for the recovered FileStore, because
# the original one will be deleted when destructing the first FileStore.
recovery_filename = self.file_name + "_recovery"
if self.rank == 0:
# the file will be deleted by the recovered FileStore
open(recovery_filename, "w").close()
# not necessary to run barrier here, as DDP will synchronize
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = TestModel().float().to(device_id)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
del ddp
del process_group
del store # this will delete self.file_name
store = c10d.FileStore(recovery_filename, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_default_pg(self):
dist.init_process_group(
"nccl",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
default_pg = c10d.distributed_c10d._get_default_group()
dist.destroy_process_group(default_pg)
self.assertFalse(dist.is_initialized())
def _test_grad_layout(self, replica_devices, layer_devs, local_batch_size):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = local_batch_size * self.world_size
# Carry out some trials with small buckets and some with big buckets.
bucketsizes = (0.000001, 25)
# Tuples of lists. Each list describes per-layer characteristics for one trial.
layer_formats = (
[torch.contiguous_format] * 4,
[torch.channels_last] * 2 + [torch.contiguous_format] * 2,
[torch.channels_last] * 4,
)
layer_dtypes = (
[torch.float] * 4,
[torch.float] * 2 + [torch.half] * 2,
[torch.half] * 4,
)
input_dev = layer_devs[0] if isinstance(layer_devs, list) else layer_devs
target_dev = layer_devs[-1] if isinstance(layer_devs, list) else layer_devs
input = torch.randn(
(global_batch_size, 8, 8, 8), device=input_dev, dtype=torch.float
)
target = torch.randn(
(global_batch_size, 8, 4, 4), device=target_dev, dtype=torch.float
)
local_batch_start = self.rank * local_batch_size
local_batch_end = (self.rank + 1) * local_batch_size
# Reducer.cpp sneakily creates one "initial bucket" that ignores the "bucket_cap_mb"
# argument. The following makes sure the initial bucket also complies.
@contextmanager
def first_bucket_size(ddp_bucket_mb):
old_DEFAULT_FIRST_BUCKET_BYTES = dist._DEFAULT_FIRST_BUCKET_BYTES
dist._DEFAULT_FIRST_BUCKET_BYTES = int(ddp_bucket_mb * 1.0e6)
try:
yield
finally:
dist._DEFAULT_FIRST_BUCKET_BYTES = old_DEFAULT_FIRST_BUCKET_BYTES
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
):
for formats, dtypes, bucketsize in product(
layer_formats, layer_dtypes, bucketsizes
):
with first_bucket_size(bucketsize):
model_msg = (
"rank = {} formats = {} dtypes = {} bucketsize = {} ".format(
self.rank, formats, dtypes, bucketsize
)
)
try:
m = ConvNet(layer_devs, formats, dtypes)
m_ddp = DistributedDataParallel(
copy.deepcopy(m),
device_ids=replica_devices,
process_group=process_group,
bucket_cap_mb=bucketsize,
)
opt = torch.optim.SGD(m.parameters(), lr=0.1)
opt_ddp = torch.optim.SGD(m_ddp.parameters(), lr=0.1)
has_half = any(p.dtype is torch.half for p in m.parameters())
tol = 1.0e-3 if has_half else 1.0e-5
except BaseException:
# Prints case-specific debugging info to narrow down failing case.
print(
"Caught exception during model creation for " + model_msg,
flush=True,
)
raise
# 3 iters: First iter creates grads, second iter retests after rebucketing,
# third iter tries zeroed grads.
for it in range(3):
iter_msg = "iter = {} ".format(it) + model_msg
named_msg = iter_msg
try:
F.mse_loss(m(input).float(), target).backward()
F.mse_loss(
m_ddp(input[local_batch_start:local_batch_end]).float(),
target[local_batch_start:local_batch_end],
).backward()
for i, ((layer_name, m_child), m_ddp_child) in enumerate(
zip(m.named_children(), m_ddp.module.children())
):
named_msg = layer_name + ".weight" + " " + iter_msg
self.assertTrue(
m_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
self.assertTrue(
m_ddp_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
for j, ((param_name, p), p_ddp) in enumerate(
zip(
m_child.named_parameters(),
m_ddp_child.parameters(),
)
):
named_msg = (
layer_name + "." + param_name + " " + iter_msg
)
self.assertEqual(
p.grad, p_ddp.grad, rtol=tol, atol=tol
)
opt.step()
opt_ddp.step()
if it == 0:
for p, p_ddp in zip(m.parameters(), m_ddp.parameters()):
p.grad = None
p_ddp.grad = None
else:
m.zero_grad()
m_ddp.zero_grad()
except BaseException:
# Makes sure we still get info if an error occurred somewhere other than the asserts.
print(
"Caught exception during iterations at " + named_msg,
flush=True,
)
raise
@requires_nccl()
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_grad_layout_1devicemodule_1replicaperprocess(self):
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
# Tells DDP to use just one device.
replica_devices = [dev0]
# Tells _test_grad_layout to construct ConvNet with all layers on this process's first assigned device.
layer_devs = dev0
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(4)
@skip_if_rocm
def test_grad_layout_2devicemodule(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
dev0 = torch.device("cuda:" + str(int_devices[0]))
dev1 = torch.device("cuda:" + str(int_devices[1]))
# DDP's default behavior for a multi-device module is "don't replicate."
replica_devices = None
# Tells _test_grad_layout to constructs this process's ConvNet on 2 devices, with 2 layers on each device.
layer_devs = [dev0] * 2 + [dev1] * 2
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_param_layout_mismatch_error(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
layer_devs = dev0
layer_formats = (
[torch.contiguous_format] * 4
if self.rank == 0
else [torch.channels_last] * 4
)
layer_dtypes = [torch.float] * 4
m = ConvNet(layer_devs, layer_formats, layer_dtypes)
if self.rank == 0:
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
else:
with self.assertRaisesRegex(
RuntimeError,
".* appears not to match strides of the same param in process 0",
):
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
def _gpu_model_with_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False, state=None, static_graph=False
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
if static_graph:
gpu_model._set_static_graph()
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_future_passing_gpu_nccl(self):
"""
This unit test verifies whether the Future object is passed properly using nccl backend.
The hook callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with simple_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook)
# check whether the grads are equal to what simple_hook's then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2))
def _test_ddp_comm_hook_allreduce_hook_nccl(self, gradient_as_bucket_view=False, static_graph=False):
"""
This unit test verifies whether a DDP communication hook that just calls
allreduce gives the same result with the case of no hook registered.
Without the then callback, the future_value in reducer is no longer
a PyObject, and this unit test verifies future_value is properly checked.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_hook(state: object, bucket: dist.GradBucket) -> torch._C.Future:
tensors = [bucket.get_tensor() / self.world_size]
return process_group.allreduce(tensors).get_future()
# Get GPU model with allreduce_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_hook, gradient_as_bucket_view, static_graph
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_default_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether default Python DDP communication hooks ALLREDUCE and FP16_COMPRESS
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# For these default DDP comm hooks, the only state is process group.
state = process_group
for hook in [default.allreduce_hook, default.fp16_compress_hook]:
# Get GPU model with the hook registered.
# The first arg 'process_group' is used for initializing the test environment,
# so it cannot be replaced by 'state', although they have the same value.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_fp16_compress_wrapper(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether wrapping the ALLREDUCE and POWER_SGD hooks with
the FP16_WRAPPER can give the same result as when there is no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
powerSGD_state = powerSGD.PowerSGDState(process_group=process_group)
hook_args = [(powerSGD.powerSGD_hook, powerSGD_state), (default.allreduce_hook, process_group)]
for hook, state in hook_args:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default.fp16_compress_wrapper(hook),
gradient_as_bucket_view,
state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_powerSGD_ddp_comm_hook_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether Python DDP communication hook POWER_SGD
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with the hook registered.
# Test the hook with different algorithmic configs.
for use_error_feedback, warm_start in product([True, False], [True, False]):
state = powerSGD.PowerSGDState(
process_group=process_group,
matrix_approximation_rank=1,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
for hook in [powerSGD.powerSGD_hook, powerSGD.batched_powerSGD_hook]:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_builtin_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether built-in C++ DDP communication hooks ALLREDUCE and FP16_COMPRESS
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for comm_hook_type in [
dist.BuiltinCommHookType.ALLREDUCE,
dist.BuiltinCommHookType.FP16_COMPRESS,
]:
# Get GPU model with the built-in communication hook.
gpu_model = self._gpu_model_with_builtin_ddp_comm_hook(
process_group, comm_hook_type, gradient_as_bucket_view
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl(self):
self._test_ddp_comm_hook_allreduce_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl(self):
self._test_default_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_nccl(self):
self._test_fp16_compress_wrapper()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl(self):
self._test_builtin_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl(self):
self._test_powerSGD_ddp_comm_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_grad_is_view(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_static_graph(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(static_graph=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl_is_view(self):
self._test_default_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_is_view(self):
self._test_fp16_compress_wrapper(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl_grad_is_view(self):
self._test_builtin_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl_grad_is_view(self):
self._test_powerSGD_ddp_comm_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_with_then_hook_nccl(self):
"""
This unit test verifies whether a DDP communication hook that calls allreduce and then
multiplies the result by ten and divides by two gives the expected result.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_with_then_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future:
tensors = [bucket.get_tensor() / self.world_size]
fut = process_group.allreduce(tensors).get_future()
def mult(fut):
# Multiply the result by 10.
return [10 * t for t in fut.value()]
def div(fut):
# Divide the result by 2.
return [0.5 * t for t in fut.value()]
return fut.then(mult).then(div)
# Get GPU model with allreduce_with_then_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_with_then_hook
)
# check whether the grads are equal to what allreduce returns multuplied by 5.
# without the comm_hook, result would be still 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 1.25 * torch.ones(2, 2))
class AcceptsParam(torch.nn.Module):
def __init__(self, p, factor):
super().__init__()
self.a = p
self.f = factor
def forward(self, input):
return input + self.a * self.f
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
size = 2048 * 2048
dev = self.rank
world = self.world_size
p = torch.nn.Parameter(torch.randn(size, requires_grad=True))
for try_set_to_none, use_bucket_view in product((False, True), (False, True)):
m = torch.nn.Sequential(self.AcceptsParam(p, dev + 1),
self.AcceptsParam(p, dev + 1)).cuda(dev)
m = torch.nn.parallel.DistributedDataParallel(m,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[dev],
process_group=process_group)
for i in range(3):
m.zero_grad(set_to_none=try_set_to_none)
m(1).sum().backward()
# Each param value is multiplied by "rank + 1" twice in forward, so the grad
# values produced by a particular rank should be 2. * (rank + 1).
# Summing these over ranks and dividing by world size gives the expected result:
analytic = torch.full_like(p, 2. * (world * (world + 1.) / 2.) / world, device=dev)
for name, p in m.named_parameters():
self.assertEqual(p.grad, analytic, "mismatch at " + name + ".grad for " +
"set_to_none = {}, use_bucket_view = {}".format(try_set_to_none,
use_bucket_view))
# A list of tests for ddp with activation checkpointing
# when gradient_as_bucket_view=True, False.
# Most of the tests are referred to
# https://github.com/facebookresearch/fairscale/blob/master/tests/nn/pipe/test_checkpoint_ddp.py
class CheckpointOnceModule(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.Linear(20, 20)
self.l2 = nn.Linear(20, 20)
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
return x
class CheckpointTwiceModule(CheckpointOnceModule):
def __init__(self):
super().__init__()
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
x = checkpoint(self.l2, x)
return x
def _prepare_dummy_data(self):
ddp_bs = 16
bs = ddp_bs * self.world_size
input = torch.rand((bs, 20), device="cuda", requires_grad=True)
target = torch.randn((bs, 20), device="cuda")
offset = self.rank * ddp_bs
ddp_input = input[offset: offset + ddp_bs]
ddp_target = target[offset: offset + ddp_bs]
return input, ddp_input, target, ddp_target
def _train_model(self, model, input_var, target, loss, run_checkpoint=False):
model.train()
if run_checkpoint:
output = checkpoint(model, input_var)
else:
output = model(input_var)
l = loss(output, target)
l.backward()
def _test_ddp_checkpointing(
self,
input_model,
process_group,
use_bucket_view,
find_unused_parameters=False,
static_graph=False,
run_checkpoint=False
):
# to reprodce the same training results
torch.cuda.set_device(self.rank)
torch.manual_seed(31415)
model = copy.deepcopy(input_model).cuda()
ddp_model = copy.deepcopy(input_model).cuda()
ddp_model = nn.parallel.DistributedDataParallel(
ddp_model,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[self.rank],
process_group=process_group,
find_unused_parameters=find_unused_parameters
)
if static_graph:
ddp_model._set_static_graph()
self.assertEqual(ddp_model._get_ddp_logging_data().get("static_graph", 0), static_graph)
input, ddp_input, target, ddp_target = self._prepare_dummy_data()
loss = nn.MSELoss()
for i in range(5):
model.zero_grad(set_to_none=False)
ddp_model.zero_grad(set_to_none=False)
self._train_model(model, input, target, loss, run_checkpoint=run_checkpoint)
self._train_model(ddp_model, ddp_input, ddp_target, loss, run_checkpoint=run_checkpoint)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertTrue(i.grad is not None)
self.assertTrue(j.grad is not None)
self.assertEqual(i.grad, j.grad)
# DDP works as expect when layer is checkpointed only once
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_once(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view, static_graph in product((False, True), (False, True)):
self._test_ddp_checkpointing(self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph)
# DDP will fail when there are unused_parameters in the model
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_unused_params(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
with self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once.",
):
model = self._test_ddp_checkpointing(self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True,
static_graph=False)
# test passes when static_graph is true
model = self._test_ddp_checkpointing(self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True,
static_graph=True)
# DDP will fail when the same layer is checkponted twice
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_twice(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
with self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once.",
):
model = self._test_ddp_checkpointing(self.CheckpointTwiceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=False)
model = self._test_ddp_checkpointing(self.CheckpointTwiceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=True)
# DDP works as expected if there is weight sharing among layers
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
torch.cuda.set_device(self.rank)
for use_bucket_view, static_graph in product((False, True), (False, True)):
torch.manual_seed(31415)
l1 = nn.Linear(20, 20)
l2 = nn.Linear(20, 20)
l1.weight = l2.weight
model = nn.Sequential(l1, l2)
self._test_ddp_checkpointing(model,
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph,
run_checkpoint=True)
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class NcclErrorHandlingTest(MultiProcessTestCase):
def setUp(self):
super(NcclErrorHandlingTest, self).setUp()
# Need to skip return code checking for these tests since the child
# processes don't exit cleanly.
self.skip_return_code_checks = [
self.test_nccl_errors_blocking_abort.__wrapped__,
self.test_nccl_errors_blocking_sigkill.__wrapped__,
self.test_nccl_errors_blocking_sigterm.__wrapped__,
self.test_nccl_errors_blocking_nonzero_exit.__wrapped__,
]
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._fork_processes()
def tearDown(self):
super(NcclErrorHandlingTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 3
@property
def blocking_wait_error_msg(self):
return "Caught collective operation timeout"
def _run_all_reduce(self, pg):
pg.allreduce(torch.rand(10).cuda(self.rank))
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_nonblocking(self):
# Note: we unset and restore NCCL_ASYNC_ERROR_HANDLING for this test
# since test_c10d_common runs with async error handling by default, but this
# tests behavior when it is not enabled.
prev_nccl_async_error_handling = os.environ.get("NCCL_ASYNC_ERROR_HANDLING", None)
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
# This allreduce does not block Python thread as allreduce enqueues
# the cuda operation, and then wait only blocks the current cuda
# stream.
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
work.wait()
# Now the work scheduled next should hang forever since the previous
# allreduce will never complete.
t = threading.Thread(target=self._run_all_reduce, args=(process_group,))
t.daemon = True
t.start()
t.join(int(get_timeout(self.id()) / 5))
self.assertTrue(t.is_alive())
if prev_nccl_async_error_handling is not None:
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = prev_nccl_async_error_handling
def _test_nccl_errors_blocking(self, func):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=self.op_timeout_sec),
)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# Operation would time out in blocking mode.
work.wait()
# Run some GPU operations to make sure cuda has not gotten stuck.
# It was observed cuda could get stuck if NCCL communicators were
# not properly aborted before throwing RuntimeError.
a = torch.rand(10).cuda(self.rank)
elif self.rank == 1:
# Clean up structures (ex: files for FileStore before going down)
del process_group
func()
else:
# Wait for timeout
time.sleep(2 * self.op_timeout_sec)
# Now verify communicators on this rank have been aborted by the watchdog thread.
self._wait_for_comm_abort(process_group)
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_clean_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(0))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_nonzero_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(1))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
@unittest.skip("Frequently times out see https://github.com/pytorch/pytorch/issues/58920")
def test_nccl_errors_blocking_abort(self):
self._test_nccl_errors_blocking(lambda: os.abort())
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigkill(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGKILL))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigterm(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGTERM))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
def test_nccl_blocking_wait_with_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=self.op_timeout_sec),
)
process_group.barrier().wait()
if self.rank == 0:
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# This should timeout
process_group.barrier().wait()
def _run_invalid_nccl_blocking_wait_env(self, val):
os.environ["NCCL_BLOCKING_WAIT"] = val
store = c10d.FileStore(self.file_name, self.world_size)
with self.assertRaises(RuntimeError):
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_invalid_nccl_blocking_wait_env(self):
self._run_invalid_nccl_blocking_wait_env("abc")
self._run_invalid_nccl_blocking_wait_env("-1")
self._run_invalid_nccl_blocking_wait_env("2147483647")
self._run_invalid_nccl_blocking_wait_env("4294967295")
def _wait_for_comm_abort(self, process_group):
"""
Waits for the watchdog thread to abort communicators for the process group.
"""
while True:
try:
process_group.allreduce(torch.rand(10).cuda(self.rank))
except Exception as e:
if "NCCL communicator was aborted" in str(e):
return
else:
raise e
time.sleep(1)
@with_nccl_blocking_wait
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_nccl_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
# Initialize process_group.
timeout = 1
process_group = c10d.ProcessGroupNCCL(
store, self.rank, self.world_size, timeout=timedelta(seconds=timeout)
)
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()
if self.rank == 0:
# This should timeout in about 1 second.
start = time.time()
# Watchdog may abort timed out work resulting in NCCL error instead of operation timed out.
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()
else:
# Sleep to ensure timeout.
time.sleep(2 * timeout)
self._wait_for_comm_abort(process_group)
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
if sys.platform == "win32":
self._spawn_processes()
else:
self._fork_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def _test_broadcast_coalesced(self, process_group, device, root_rank):
half = torch.float16
# No support for float16 for CPU tensors
if device == torch.device("cpu"):
half = torch.float32
target = torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
# The tensors to pass to broadcast are idential to the target
# only on the process that is the root of the broadcast.
if self.rank == root_rank:
tensors = list(tensor.clone() for tensor in target)
else:
tensors = list(torch.zeros_like(tensor) for tensor in target)
if self.rank != root_rank:
self.assertNotEqual(tensors, target)
c10d._broadcast_coalesced(
process_group, tensors, buffer_size=256, src=root_rank
)
if self.rank != root_rank:
self.assertEqual(tensors, target)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
device = torch.device("cuda:%d" % self.rank)
ranks = [0, 1]
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_default_pg_nccl(self):
torch.cuda.set_device(self.rank)
self._test_sequence_num_set_default_pg(backend="nccl")
@skip_if_lt_x_gpu(2)
@requires_nccl()
def test_sequence_num_incremented_nccl_default(self):
self._test_sequence_num_incremented_default_group("nccl")
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sequence_num_incremented_nccl_subgroup(self):
if self.world_size < 4:
return unittest.skip("Test requires world_size of at least 4")
self._test_sequence_num_incremented_subgroup("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_nccl_new_group(self):
torch.cuda.set_device(self.rank)
self._test_sequence_num_set_new_group(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_nccl_options_high_priority_stream(self):
pg_opts = c10d.ProcessGroupNCCL.Options()
pg_opts.is_high_priority_stream = True
store = c10d.FileStore(self.file_name, self.world_size)
# Test init_process_group accepts options
dist.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=pg_opts
)
# Test with new_group
pg = c10d.new_group([0, 1], pg_options=pg_opts)
# test if the process group constructed with high priority stream
self.assertTrue(pg.options.is_high_priority_stream)
# test the process group works as expected
t = torch.tensor([self.rank + 1] * 10).cuda(self.rank)
pg.allreduce(t).wait()
expected_tensor = torch.tensor([3] * 10).cuda(self.rank)
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
c10d.all_reduce(t)
expected_tensor = torch.tensor([3] * 10).cuda(2 * self.rank)
self.assertEqual(expected_tensor, t)
# Test with new_group
pg = c10d.new_group([0, 1])
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([0])
if self.rank == 0:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([1])
if self.rank == 1:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group_non_member(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
if self.rank == 1:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
c10d.barrier(device_ids=[self.rank])
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids_function_argument(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
with self.assertRaisesRegex(RuntimeError, "Invalid function argument"):
c10d.barrier(device_ids=self.rank)
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
bmv2.py
|
# Copyright 2018-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
import socket
import subprocess
import threading
import time
from contextlib import closing
BMV2_TARGET_EXE = 'simple_switch_grpc'
BMV2_RUNTIME_FILE_PATH_PREFIX = '/tmp/bmv2-ptf'
SWITCH_START_TIMEOUT = 5
logger = logging.getLogger("BMv2 switch")
def get_stratum_root():
if 'STRATUM_ROOT' in os.environ:
return os.environ['STRATUM_ROOT']
else:
raise Exception("STRATUM_ROOT env not defined")
def get_stratum_ld_path():
if 'BMV2_INSTALL' in os.environ:
return 'LD_LIBRARY_PATH=%s/lib' % os.environ['BMV2_INSTALL']
else:
return ''
STRATUM_BMV2 = 'stratum_bmv2'
STRATUM_BINARY = '/bazel-bin/stratum/hal/bin/bmv2/' + STRATUM_BMV2
STRATUM_LD_PATH = get_stratum_ld_path()
INITIAL_PIPELINE = '/stratum/hal/bin/bmv2/dummy.json'
def check_bmv2_target(target):
try:
with open(os.devnull, 'w') as devnull:
subprocess.check_call([target, '--version'],
stdout=devnull, stderr=devnull)
return True
except subprocess.CalledProcessError:
return True
except OSError: # Target executable not found
return False
def watchdog(sw):
while True:
if sw.bmv2popen is None:
return
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
if s.connect_ex(('127.0.0.1', sw.grpc_port)) == 0:
time.sleep(1)
else:
logger.error("%s process terminated!" % BMV2_TARGET_EXE)
return
class Bmv2Switch:
def __init__(self, device_id, port_map_path, grpc_port, cpu_port,
loglevel='warn', is_stratum=False, debugger=False):
self.device_id = device_id
self.port_map_path = port_map_path
self.grpc_port = int(grpc_port)
self.cpu_port = cpu_port
self.loglevel = loglevel
self.logfile = '%s.log' % BMV2_RUNTIME_FILE_PATH_PREFIX
self.logfd = None
self.bmv2popen = None
self.is_stratum = is_stratum
self.debugger = debugger
if not check_bmv2_target(BMV2_TARGET_EXE):
raise Exception("%s executable not found" % BMV2_TARGET_EXE)
def get_stratum_cmd(self, port_map):
stratumRoot = get_stratum_root()
args = [
stratumRoot + STRATUM_BINARY,
'--device_id=%s' % str(self.device_id),
'--forwarding_pipeline_configs_file=/dev/null',
'--persistent_config_dir=/dev/null',
'--initial_pipeline=' + stratumRoot + INITIAL_PIPELINE,
'--cpu_port=%s' % self.cpu_port,
'--external-hercules-urls=0.0.0.0:%s' % self.grpc_port,
]
for port, intf in port_map.items():
args.append('%d@%s' % (port, intf))
cmdString = " ".join(args)
return cmdString
def get_cmd(self, port_map):
bmv2_args = ['--device-id %s' % str(self.device_id)]
for p4_port, intf_name in port_map.items():
bmv2_args.append('-i %d@%s' % (p4_port, intf_name))
if self.debugger:
dbgaddr = 'ipc://%s-debug.ipc' % BMV2_RUNTIME_FILE_PATH_PREFIX
bmv2_args.append('--debugger-addr %s' % dbgaddr)
bmv2_args.append('--log-console')
bmv2_args.append('-L%s' % self.loglevel)
bmv2_args.append('--no-p4')
# gRPC target-specific options
bmv2_args.append('--')
bmv2_args.append('--cpu-port %s' % self.cpu_port)
bmv2_args.append('--grpc-server-addr 0.0.0.0:%s' % str(self.grpc_port))
cmdString = " ".join([BMV2_TARGET_EXE] + bmv2_args)
return cmdString
def start(self):
port_map = {}
with open(self.port_map_path, 'r') as port_map_f:
port_list = json.load(port_map_f)
for entry in port_list:
p4_port = entry["p4_port"]
iface_name = entry["iface_name"]
port_map[p4_port] = iface_name
if self.is_stratum is True:
cmdString = self.get_stratum_cmd(port_map)
exeName = STRATUM_BMV2
ld_path = STRATUM_LD_PATH + " "
else:
exeName = BMV2_TARGET_EXE
cmdString = self.get_cmd(port_map)
ld_path = ""
logger.info("\nStarting %s... %s\n" % (exeName, cmdString))
# Start the switch
try:
self.logfd = open(self.logfile, "w")
self.bmv2popen = subprocess.Popen(ld_path + "exec " + cmdString,
stdout=self.logfd,
stderr=self.logfd,
shell=True)
self.wait_bmv2_start()
# We want to be notified if process crashes...
threading.Thread(target=watchdog, args=[self]).start()
except:
self.kill()
raise
def wait_bmv2_start(self):
# Wait for switch to open gRPC port, before sending ONOS the netcfg.
# Include time-out just in case something hangs.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
endtime = time.time() + SWITCH_START_TIMEOUT
while True:
result = sock.connect_ex(('127.0.0.1', self.grpc_port))
if result == 0: # Port is open. All good.
sock.close()
break
if endtime > time.time(): # Wait...
time.sleep(0.2)
else: # Time's up.
raise Exception("Switch did not start before timeout")
def kill(self):
logger.info("Killing...")
if self.bmv2popen is not None:
self.bmv2popen.kill()
self.bmv2popen = None
if self.logfd is not None:
self.logfd.close()
self.logfd = None
|
via_app_data.py
|
"""Bootstrap"""
from __future__ import absolute_import, unicode_literals
import logging
import sys
import traceback
from contextlib import contextmanager
from subprocess import CalledProcessError
from threading import Lock, Thread
from virtualenv.info import fs_supports_symlink
from virtualenv.seed.embed.base_embed import BaseEmbed
from virtualenv.seed.wheels import get_wheel
from virtualenv.util.path import Path
from .pip_install.copy import CopyPipInstall
from .pip_install.symlink import SymlinkPipInstall
class FromAppData(BaseEmbed):
def __init__(self, options):
super(FromAppData, self).__init__(options)
self.symlinks = options.symlink_app_data
@classmethod
def add_parser_arguments(cls, parser, interpreter, app_data):
super(FromAppData, cls).add_parser_arguments(parser, interpreter, app_data)
can_symlink = app_data.transient is False and fs_supports_symlink()
parser.add_argument(
"--symlink-app-data",
dest="symlink_app_data",
action="store_true" if can_symlink else "store_false",
help="{} symlink the python packages from the app-data folder (requires seed pip>=19.3)".format(
"" if can_symlink else "not supported - ",
),
default=False,
)
def run(self, creator):
if not self.enabled:
return
with self._get_seed_wheels(creator) as name_to_whl:
pip_version = name_to_whl["pip"].version_tuple if "pip" in name_to_whl else None
installer_class = self.installer_class(pip_version)
exceptions = {}
def _install(name, wheel):
try:
logging.debug("install %s from wheel %s via %s", name, wheel, installer_class.__name__)
key = Path(installer_class.__name__) / wheel.path.stem
wheel_img = self.app_data.wheel_image(creator.interpreter.version_release_str, key)
installer = installer_class(wheel.path, creator, wheel_img)
parent = self.app_data.lock / wheel_img.parent
with parent.non_reentrant_lock_for_key(wheel_img.name):
if not installer.has_image():
installer.build_image()
installer.install(creator.interpreter.version_info)
except Exception: # noqa
exceptions[name] = sys.exc_info()
threads = list(Thread(target=_install, args=(n, w)) for n, w in name_to_whl.items())
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if exceptions:
messages = ["failed to build image {} because:".format(", ".join(exceptions.keys()))]
for value in exceptions.values():
exc_type, exc_value, exc_traceback = value
messages.append("".join(traceback.format_exception(exc_type, exc_value, exc_traceback)))
raise RuntimeError("\n".join(messages))
@contextmanager
def _get_seed_wheels(self, creator):
name_to_whl, lock, fail = {}, Lock(), {}
def _get(distribution, version):
for_py_version = creator.interpreter.version_release_str
failure, result = None, None
# fallback to download in case the exact version is not available
for download in [True] if self.download else [False, True]:
failure = None
try:
result = get_wheel(
distribution=distribution,
version=version,
for_py_version=for_py_version,
search_dirs=self.extra_search_dir,
download=download,
app_data=self.app_data,
do_periodic_update=self.periodic_update,
env=self.env,
)
if result is not None:
break
except Exception as exception: # noqa
logging.exception("fail")
failure = exception
if failure:
if isinstance(failure, CalledProcessError):
msg = "failed to download {}".format(distribution)
if version is not None:
msg += " version {}".format(version)
msg += ", pip download exit code {}".format(failure.returncode)
output = failure.output if sys.version_info < (3, 5) else (failure.output + failure.stderr)
if output:
msg += "\n"
msg += output
else:
msg = repr(failure)
logging.error(msg)
with lock:
fail[distribution] = version
else:
with lock:
name_to_whl[distribution] = result
threads = list(
Thread(target=_get, args=(distribution, version))
for distribution, version in self.distribution_to_versions().items()
)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if fail:
raise RuntimeError("seed failed due to failing to download wheels {}".format(", ".join(fail.keys())))
yield name_to_whl
def installer_class(self, pip_version_tuple):
if self.symlinks and pip_version_tuple:
# symlink support requires pip 19.3+
if pip_version_tuple >= (19, 3):
return SymlinkPipInstall
return CopyPipInstall
def __unicode__(self):
base = super(FromAppData, self).__unicode__()
msg = ", via={}, app_data_dir={}".format("symlink" if self.symlinks else "copy", self.app_data)
return base[:-1] + msg + base[-1]
|
power_monitoring.py
|
import random
import threading
import time
from statistics import mean
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0 # Lower limit on the LPF car battery voltage
VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss
MAX_TIME_OFFROAD_S = 30*3600
MIN_ON_TIME_S = 3600
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of peripheralState voltage
self.car_voltage_instant_mV = 12e3 # Last value of peripheralState voltage
self.integration_lock = threading.Lock()
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, peripheralState, ignition):
try:
now = sec_since_boot()
# If peripheralState is None, we're probably not in a car, so we don't care
if peripheralState is None or peripheralState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_instant_mV = peripheralState.voltage
self.car_voltage_mV = ((peripheralState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if ignition:
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
# if integration_time_h < 0:
# raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t, current_power):
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
# if power_used < 0:
# raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self):
return int(self.power_used_uWh)
def get_car_battery_capacity(self):
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, ignition, in_car, offroad_timestamp, dp_auto_shutdown, dp_auto_shutdown_in):
if offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
if dp_auto_shutdown:
disable_charging |= (now - offroad_timestamp) > dp_auto_shutdown_in * 60
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= not ignition
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging &= in_car
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, peripheralState, ignition, in_car, offroad_timestamp, started_seen, dp_auto_shutdown, dp_auto_shutdown_in):
if offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client)
BATT_PERC_OFF = 10
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(ignition, in_car, offroad_timestamp, dp_auto_shutdown, dp_auto_shutdown_in))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60))
should_shutdown &= started_seen or (now > MIN_ON_TIME_S)
return should_shutdown
|
concurrent.py
|
#!/usr/bin/env python3
import argparse
from urllib.request import urlopen
import yaml
import statistics
import os
import sys
import time
import re
from subprocess import Popen, PIPE
import traceback
from threading import Thread,Lock
import random
import bench_base
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
timesToRun = 4
def runConcurrentExperiment(name, data_dir):
global timesToRun
print("Running concurrent experiment for '" + name + "'.")
allConcurrentQueries = list(range(1,9))
cores = 143
memoryPerWorker = "20g"
tps = 1500000
def isDataFinished(concurrentQueries,d):
if not d or not isinstance(d,list) or len(d)<concurrentQueries:
return False
for thread in d:
if len(thread) < timesToRun: return False
return True
def run(concurrentQueries):
g_lock = Lock()
def threadEntry(threadNum):
def isFinished():
with g_lock:
for n in results:
if len(n) < timesToRun: return False
return True
try:
while not isFinished():
print(str(threadNum) + ": Calling query.")
result = bench_base.runQuery(name, "2014-01-01", "2014-01-07", True)
queryExecutionTime = result[2]['TimeMillis']-result[0]['TimeMillis']
print(str(threadNum) + ": Query execution time: " +
str(queryExecutionTime))
with g_lock:
results[threadNum].append(queryExecutionTime)
except:
print("Error occurred in thread.")
traceback.print_exc()
results = [[] for x in range(0,concurrentQueries)]
threads = [
Thread(target=threadEntry, args=(i,)) for i in range(0,concurrentQueries)
]
[t.start() for t in threads]
[t.join() for t in threads]
return results
outFilePath = data_dir + "/concurrent/" + name + ".yaml"
if os.path.isfile(outFilePath):
with open(outFilePath, "r") as f: data = yaml.load(f)
else: data = {}
for concurrentQueries in allConcurrentQueries:
if concurrentQueries in data and \
isDataFinished(concurrentQueries,data[concurrentQueries]):
print(" Already profiled for " + str(concurrentQueries) +
" concurrent queries, skipping.")
continue
else:
data[concurrentQueries] = {}
while not isDataFinished(concurrentQueries,data[concurrentQueries]):
try:
bench_base.restartServers()
bench_base.restartSparkContext(memoryPerWorker, cores)
# For cache.
bench_base.runQuery(name, "2014-01-01", "2014-01-07", True, tps)
data[concurrentQueries] = run(concurrentQueries)
with open(outFilePath, "w") as f:
f.write(yaml.dump(data, indent=2, default_flow_style=False))
except KeyboardInterrupt: sys.exit(-1)
except Exception:
print("Exception occurred, retrying.")
traceback.print_exc()
data[concurrentQueries] = {}
pass
return data
def getStats(data):
global timesToRun
x = []; y = []; err = []
sortedKeys = sorted(data)
minX = sortedKeys[0]; maxX = sortedKeys[-1]
minY = data[minX][0][0]/1000; maxY = minY
for concurrentQueries in sortedKeys:
x.append(concurrentQueries)
allTimes = []
for thread in data[concurrentQueries]:
allTimes += thread[0:timesToRun]
allTimes = [x/1000 for x in allTimes]
m_data = statistics.mean(allTimes)
if m_data < minY: minY = m_data
if m_data > maxY: maxY = m_data
y.append(m_data)
err.append(statistics.stdev(allTimes))
return (x,y,err,minX,maxX,minY,maxY)
def plotConcurrent(query, data, data_dir):
fig = plt.figure()
ax = plt.subplot(111)
plt.title(query)
plt.xlabel("Concurrent Queries")
plt.ylabel("Execution Time (s)")
(x, y, err, minX, maxX, minY, maxY) = getStats(data)
plt.errorbar(x, y, yerr=err, marker='.', color="black", ecolor="gray")
plt.axis([minX-1, maxX+1, 0, 1.02*(maxY+max(err))])
leg = ax.legend(
["Caching. 1.5M target partition size. 6 workers."],
fancybox=True
)
leg.get_frame().set_alpha(0.5)
# plt.grid()
plt.savefig(data_dir + "/concurrent/pdf/" + query + ".pdf")
plt.savefig(data_dir + "/concurrent/png/" + query + ".png")
plt.clf()
# Print stats.
def two(s): return "{:.2f}".format(s)
print(" & ".join([query, two(y[0]), two(y[1]/y[0]), two(y[7]/y[0])]) + r" \\")
parser = argparse.ArgumentParser()
parser.add_argument("--collect-data", dest="collect", action="store_true")
parser.add_argument("--create-plots", dest="plot", action="store_true")
parser.add_argument("--data-dir", dest="data_dir", type=str, default=".")
args = parser.parse_args()
queries = [
"Pageviews",
"Revenue",
"RevenueFromTopReferringDomains",
"RevenueFromTopReferringDomainsFirstVisitGoogle",
"TopPages",
"TopPagesByBrowser",
"TopPagesByPreviousTopPages",
"TopReferringDomains",
]
if args.collect:
if not os.path.isdir(args.data_dir + "/concurrent"):
os.makedirs(args.data_dir + "/concurrent")
for query in queries:
runConcurrentExperiment(query, args.data_dir)
if args.plot:
print(" & ".join(
["Query","Serial Time (ms)","2 Concurrent Slowdown","8 Concurrent Slowdown"]
) + r" \\ \hline")
if not os.path.isdir(args.data_dir + "/concurrent/pdf"):
os.makedirs(args.data_dir + "/concurrent/pdf")
if not os.path.isdir(args.data_dir + "/concurrent/png"):
os.makedirs(args.data_dir + "/concurrent/png")
for query in queries:
with open(args.data_dir + "/concurrent/" + query + ".yaml", "r") as f:
data = yaml.load(f)
plotConcurrent(query, data, args.data_dir)
|
GetMem.py
|
# coding:utf8
'''
Created on 2016��8��30��
@author: zhangq
'''
from serial import Serial
import re
from threading import Thread
import time
import datetime
import pygal
import os
class FilterMem(object):
def __init__(self, port, baudrate):
self.serial_obj = Serial()
self.serial_obj.port = port-1
self.serial_obj.baudrate = baudrate
self.connect_uart()
def connect_uart(self):
try:
self.serial_obj.open()
except Exception, e:
if self.serial_obj.isOpen():
self.serial_obj.close()
print e
return 0
def send_thread(self, _command, _period):
self.sent_thread = Thread(target=self.sendfunc,args=(_command, _period))
self.sent_thread.setDaemon(True)
self.sent_thread.start()
#self.getmem()
def getmem(self, keyword, file_name):
today = datetime.date.today()
self.file_name = r"%s_%s" % (file_name, today)
x_list = []
y_list = []
with open("%s.log"%self.file_name, "w") as f:
while 1:
self.info = self.serial_obj.readline()
print self.info
current = datetime.datetime.now()
f_time = "%s-%s-%s %s:%s:%s" % (current.year, current.month, current.day, current.hour, current.minute, current.second)
f.write("%s:%s" % (f_time, self.info))
match_info = re.search("%s.+?(\d+).+bytes" % keyword, self.info)
if match_info:
mem_val = match_info.group(1)
y_list.append(int(mem_val))
x_list.append(current)
print mem_val
if len(y_list)%10 == 0:
self.make_pic(x_list, y_list)
#print match_info.group(0)
#print "bbb"
#time.sleep(1)
def sendfunc(self, _char, _period):
self.serial_obj.write("mon\n")
while 1:
self.serial_obj.write("%s\n" % _char)
time.sleep(_period)
#print _char
# plot a sine wave from 0 to 4pi
def make_pic(self, x_list, y_list):
line_chart = pygal.Line()
line_chart.title = 'Mem usage evolution (in %)'
line_chart.x_labels = x_list
line_chart.add('Mem', y_list)
line_chart.render()
f = open('%s.html' % self.file_name, 'w')
f.write(line_chart.render())
f.close()
if __name__ == "__main__":
my_obj = FilterMem(9, 115200)
my_obj.send_thread("mid", 10)
#my_obj.connect_uart()
my_obj.getmem("Used")
# my_obj.sent_thread.join()
|
GravarEventosMouseTeclado.py
|
#!/usr/bin/python3
import os
import sys
import time
import json
import requests
from tkinter import *
from PIL import ImageTk, Image
from .database.database import *
from .database.datalocal import *
from datetime import date, datetime
from threading import Thread
class GravarEventosMouseTeclado:
def __init__(self):
self.db = DataBase()
self.db_base = DataLocal()
self.dir_path = "C:\\ConectaIT\\modules"
self.ler_dados = open(self.dir_path + "\\logs\\OnMouseEvents.json")
self.ler_dados2 = open(self.dir_path + "\\logs\\OnKeyboardEvent.json")
self.qntd_dados = self.ler_dados.readlines()
self.qntd_dados2 = self.ler_dados2.readlines()
try:
self.file_lines = self.qntd_dados[len(self.qntd_dados) - 1]
self.file_lines2 = self.qntd_dados2[len(self.qntd_dados2) - 1]
self.json_decode = json.loads(self.file_lines)
self.json_decode2 = json.loads(self.file_lines2)
self.data_time = datetime.strptime(
self.json_decode['time'], '%d/%m/%Y %H:%M').strftime('%d%m%Y%H%M')
self.data_time2 = datetime.strptime(
self.json_decode2['time'], '%d/%m/%Y %H:%M').strftime('%d%m%Y%H%M')
self.data_time_atual = datetime.now()
self.data_time_atual = self.data_time_atual.strftime('%d%m%Y%H%M')
if(self.data_time and self.data_time2):
if(self.data_time == self.data_time_atual):
self.dados = self.db_base.dados()
self.idUser = str(self.dados['employee']['id'])
self.inf = self.db.update_status(self.idUser)
#print(self.inf)
except:
try:
self.file_lines = self.qntd_dados[len(self.qntd_dados) - 1]
self.json_decode = json.loads(self.file_lines)
self.data_time = datetime.strptime(
self.json_decode['time'], '%d/%m/%Y %H:%M').strftime('%d%m%Y%H%M')
self.data_time_atual = datetime.now()
self.data_time_atual = self.data_time_atual.strftime(
'%d%m%Y%H%M')
if(self.data_time):
if(self.data_time == self.data_time_atual):
self.dados = self.db_base.dados()
self.idUser = str(self.dados['employee']['id'])
self.inf = self.db.update_status(self.idUser)
#print(self.inf)
except:
try:
self.file_lines2 = self.qntd_dados2[len(
self.qntd_dados2) - 1]
self.json_decode2 = json.loads(self.file_lines2)
self.data_time2 = datetime.strptime(
self.json_decode2['time'], '%d/%m/%Y %H:%M').strftime('%d%m%Y%H%M')
self.data_time_atual = datetime.now()
self.data_time_atual = self.data_time_atual.strftime(
'%d%m%Y%H%M')
if(self.data_time2):
if(self.data_time2 == self.data_time_atual):
self.dados = self.db_base.dados()
self.idUser = str(self.dados['employee']['id'])
self.inf = self.db.update_status(self.idUser)
except:
pass
class Init_GravarEventosMouseTeclado:
def __init__(self):
self.start = Thread(target=self.iniciar)
self.start.start()
def iniciar(self):
while True:
GravarEventosMouseTeclado()
time.sleep(30)
return True
def stop(self):
self.start.join(1)
return True
|
test_connection_pool.py
|
import os
import pytest
import re
import redis
import time
from unittest import mock
from threading import Thread
from redis.connection import ssl_available, to_bool
from .conftest import skip_if_server_version_lt, _get_client, REDIS_6_VERSION
from .test_pubsub import wait_for_message
class DummyConnection:
description_format = "DummyConnection<>"
def __init__(self, **kwargs):
self.kwargs = kwargs
self.pid = os.getpid()
def connect(self):
pass
def can_read(self):
return False
class TestConnectionPool:
def get_pool(self, connection_kwargs=None, max_connections=None,
connection_class=redis.Connection):
connection_kwargs = connection_kwargs or {}
pool = redis.ConnectionPool(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs)
return pool
def test_connection_creation(self):
connection_kwargs = {'foo': 'bar', 'biz': 'baz'}
pool = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=DummyConnection)
connection = pool.get_connection('_')
assert isinstance(connection, DummyConnection)
assert connection.kwargs == connection_kwargs
def test_multiple_connections(self, master_host):
connection_kwargs = {'host': master_host}
pool = self.get_pool(connection_kwargs=connection_kwargs)
c1 = pool.get_connection('_')
c2 = pool.get_connection('_')
assert c1 != c2
def test_max_connections(self, master_host):
connection_kwargs = {'host': master_host}
pool = self.get_pool(max_connections=2,
connection_kwargs=connection_kwargs)
pool.get_connection('_')
pool.get_connection('_')
with pytest.raises(redis.ConnectionError):
pool.get_connection('_')
def test_reuse_previously_released_connection(self, master_host):
connection_kwargs = {'host': master_host}
pool = self.get_pool(connection_kwargs=connection_kwargs)
c1 = pool.get_connection('_')
pool.release(c1)
c2 = pool.get_connection('_')
assert c1 == c2
def test_repr_contains_db_info_tcp(self):
connection_kwargs = {
'host': 'localhost',
'port': 6379,
'db': 1,
'client_name': 'test-client'
}
pool = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=redis.Connection)
expected = ('ConnectionPool<Connection<'
'host=localhost,port=6379,db=1,client_name=test-client>>')
assert repr(pool) == expected
def test_repr_contains_db_info_unix(self):
connection_kwargs = {
'path': '/abc',
'db': 1,
'client_name': 'test-client'
}
pool = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=redis.UnixDomainSocketConnection)
expected = ('ConnectionPool<UnixDomainSocketConnection<'
'path=/abc,db=1,client_name=test-client>>')
assert repr(pool) == expected
class TestBlockingConnectionPool:
def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20):
connection_kwargs = connection_kwargs or {}
pool = redis.BlockingConnectionPool(connection_class=DummyConnection,
max_connections=max_connections,
timeout=timeout,
**connection_kwargs)
return pool
def test_connection_creation(self, master_host):
connection_kwargs = {'foo': 'bar', 'biz': 'baz', 'host': master_host}
pool = self.get_pool(connection_kwargs=connection_kwargs)
connection = pool.get_connection('_')
assert isinstance(connection, DummyConnection)
assert connection.kwargs == connection_kwargs
def test_multiple_connections(self, master_host):
connection_kwargs = {'host': master_host}
pool = self.get_pool(connection_kwargs=connection_kwargs)
c1 = pool.get_connection('_')
c2 = pool.get_connection('_')
assert c1 != c2
def test_connection_pool_blocks_until_timeout(self, master_host):
"When out of connections, block for timeout seconds, then raise"
connection_kwargs = {'host': master_host}
pool = self.get_pool(max_connections=1, timeout=0.1,
connection_kwargs=connection_kwargs)
pool.get_connection('_')
start = time.time()
with pytest.raises(redis.ConnectionError):
pool.get_connection('_')
# we should have waited at least 0.1 seconds
assert time.time() - start >= 0.1
def test_connection_pool_blocks_until_conn_available(self, master_host):
"""
When out of connections, block until another connection is released
to the pool
"""
connection_kwargs = {'host': master_host}
pool = self.get_pool(max_connections=1, timeout=2,
connection_kwargs=connection_kwargs)
c1 = pool.get_connection('_')
def target():
time.sleep(0.1)
pool.release(c1)
start = time.time()
Thread(target=target).start()
pool.get_connection('_')
assert time.time() - start >= 0.1
def test_reuse_previously_released_connection(self, master_host):
connection_kwargs = {'host': master_host}
pool = self.get_pool(connection_kwargs=connection_kwargs)
c1 = pool.get_connection('_')
pool.release(c1)
c2 = pool.get_connection('_')
assert c1 == c2
def test_repr_contains_db_info_tcp(self):
pool = redis.ConnectionPool(
host='localhost',
port=6379,
client_name='test-client'
)
expected = ('ConnectionPool<Connection<'
'host=localhost,port=6379,db=0,client_name=test-client>>')
assert repr(pool) == expected
def test_repr_contains_db_info_unix(self):
pool = redis.ConnectionPool(
connection_class=redis.UnixDomainSocketConnection,
path='abc',
client_name='test-client'
)
expected = ('ConnectionPool<UnixDomainSocketConnection<'
'path=abc,db=0,client_name=test-client>>')
assert repr(pool) == expected
class TestConnectionPoolURLParsing:
def test_hostname(self):
pool = redis.ConnectionPool.from_url('redis://my.host')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'my.host',
}
def test_quoted_hostname(self):
pool = redis.ConnectionPool.from_url('redis://my %2F host %2B%3D+')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'my / host +=+',
}
def test_port(self):
pool = redis.ConnectionPool.from_url('redis://localhost:6380')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6380,
}
@skip_if_server_version_lt(REDIS_6_VERSION)
def test_username(self):
pool = redis.ConnectionPool.from_url('redis://myuser:@localhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'username': 'myuser',
}
@skip_if_server_version_lt(REDIS_6_VERSION)
def test_quoted_username(self):
pool = redis.ConnectionPool.from_url(
'redis://%2Fmyuser%2F%2B name%3D%24+:@localhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'username': '/myuser/+ name=$+',
}
def test_password(self):
pool = redis.ConnectionPool.from_url('redis://:mypassword@localhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'password': 'mypassword',
}
def test_quoted_password(self):
pool = redis.ConnectionPool.from_url(
'redis://:%2Fmypass%2F%2B word%3D%24+@localhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'password': '/mypass/+ word=$+',
}
@skip_if_server_version_lt(REDIS_6_VERSION)
def test_username_and_password(self):
pool = redis.ConnectionPool.from_url('redis://myuser:mypass@localhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'username': 'myuser',
'password': 'mypass',
}
def test_db_as_argument(self):
pool = redis.ConnectionPool.from_url('redis://localhost', db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'db': 1,
}
def test_db_in_path(self):
pool = redis.ConnectionPool.from_url('redis://localhost/2', db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'db': 2,
}
def test_db_in_querystring(self):
pool = redis.ConnectionPool.from_url('redis://localhost/2?db=3',
db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'db': 3,
}
def test_extra_typed_querystring_options(self):
pool = redis.ConnectionPool.from_url(
'redis://localhost/2?socket_timeout=20&socket_connect_timeout=10'
'&socket_keepalive=&retry_on_timeout=Yes&max_connections=10'
)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'db': 2,
'socket_timeout': 20.0,
'socket_connect_timeout': 10.0,
'retry_on_timeout': True,
}
assert pool.max_connections == 10
def test_boolean_parsing(self):
for expected, value in (
(None, None),
(None, ''),
(False, 0), (False, '0'),
(False, 'f'), (False, 'F'), (False, 'False'),
(False, 'n'), (False, 'N'), (False, 'No'),
(True, 1), (True, '1'),
(True, 'y'), (True, 'Y'), (True, 'Yes'),
):
assert expected is to_bool(value)
def test_client_name_in_querystring(self):
pool = redis.ConnectionPool.from_url(
'redis://location?client_name=test-client'
)
assert pool.connection_kwargs['client_name'] == 'test-client'
def test_invalid_extra_typed_querystring_options(self):
with pytest.raises(ValueError):
redis.ConnectionPool.from_url(
'redis://localhost/2?socket_timeout=_&'
'socket_connect_timeout=abc'
)
def test_extra_querystring_options(self):
pool = redis.ConnectionPool.from_url('redis://localhost?a=1&b=2')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'a': '1',
'b': '2'
}
def test_calling_from_subclass_returns_correct_instance(self):
pool = redis.BlockingConnectionPool.from_url('redis://localhost')
assert isinstance(pool, redis.BlockingConnectionPool)
def test_client_creates_connection_pool(self):
r = redis.Redis.from_url('redis://myhost')
assert r.connection_pool.connection_class == redis.Connection
assert r.connection_pool.connection_kwargs == {
'host': 'myhost',
}
def test_invalid_scheme_raises_error(self):
with pytest.raises(ValueError) as cm:
redis.ConnectionPool.from_url('localhost')
assert str(cm.value) == (
'Redis URL must specify one of the following schemes '
'(redis://, rediss://, unix://)'
)
class TestConnectionPoolUnixSocketURLParsing:
def test_defaults(self):
pool = redis.ConnectionPool.from_url('unix:///socket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
}
@skip_if_server_version_lt(REDIS_6_VERSION)
def test_username(self):
pool = redis.ConnectionPool.from_url('unix://myuser:@/socket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'username': 'myuser',
}
@skip_if_server_version_lt(REDIS_6_VERSION)
def test_quoted_username(self):
pool = redis.ConnectionPool.from_url(
'unix://%2Fmyuser%2F%2B name%3D%24+:@/socket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'username': '/myuser/+ name=$+',
}
def test_password(self):
pool = redis.ConnectionPool.from_url('unix://:mypassword@/socket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'password': 'mypassword',
}
def test_quoted_password(self):
pool = redis.ConnectionPool.from_url(
'unix://:%2Fmypass%2F%2B word%3D%24+@/socket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'password': '/mypass/+ word=$+',
}
def test_quoted_path(self):
pool = redis.ConnectionPool.from_url(
'unix://:mypassword@/my%2Fpath%2Fto%2F..%2F+_%2B%3D%24ocket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/my/path/to/../+_+=$ocket',
'password': 'mypassword',
}
def test_db_as_argument(self):
pool = redis.ConnectionPool.from_url('unix:///socket', db=1)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 1,
}
def test_db_in_querystring(self):
pool = redis.ConnectionPool.from_url('unix:///socket?db=2', db=1)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 2,
}
def test_client_name_in_querystring(self):
pool = redis.ConnectionPool.from_url(
'redis://location?client_name=test-client'
)
assert pool.connection_kwargs['client_name'] == 'test-client'
def test_extra_querystring_options(self):
pool = redis.ConnectionPool.from_url('unix:///socket?a=1&b=2')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'a': '1',
'b': '2'
}
@pytest.mark.skipif(not ssl_available, reason="SSL not installed")
class TestSSLConnectionURLParsing:
def test_host(self):
pool = redis.ConnectionPool.from_url('rediss://my.host')
assert pool.connection_class == redis.SSLConnection
assert pool.connection_kwargs == {
'host': 'my.host',
}
def test_cert_reqs_options(self):
import ssl
class DummyConnectionPool(redis.ConnectionPool):
def get_connection(self, *args, **kwargs):
return self.make_connection()
pool = DummyConnectionPool.from_url(
'rediss://?ssl_cert_reqs=none')
assert pool.get_connection('_').cert_reqs == ssl.CERT_NONE
pool = DummyConnectionPool.from_url(
'rediss://?ssl_cert_reqs=optional')
assert pool.get_connection('_').cert_reqs == ssl.CERT_OPTIONAL
pool = DummyConnectionPool.from_url(
'rediss://?ssl_cert_reqs=required')
assert pool.get_connection('_').cert_reqs == ssl.CERT_REQUIRED
pool = DummyConnectionPool.from_url(
'rediss://?ssl_check_hostname=False')
assert pool.get_connection('_').check_hostname is False
pool = DummyConnectionPool.from_url(
'rediss://?ssl_check_hostname=True')
assert pool.get_connection('_').check_hostname is True
class TestConnection:
def test_on_connect_error(self):
"""
An error in Connection.on_connect should disconnect from the server
see for details: https://github.com/andymccurdy/redis-py/issues/368
"""
# this assumes the Redis server being tested against doesn't have
# 9999 databases ;)
bad_connection = redis.Redis(db=9999)
# an error should be raised on connect
with pytest.raises(redis.RedisError):
bad_connection.info()
pool = bad_connection.connection_pool
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt('2.8.8')
def test_busy_loading_disconnects_socket(self, r):
"""
If Redis raises a LOADING error, the connection should be
disconnected and a BusyLoadingError raised
"""
with pytest.raises(redis.BusyLoadingError):
r.execute_command('DEBUG', 'ERROR', 'LOADING fake message')
assert not r.connection._sock
@skip_if_server_version_lt('2.8.8')
def test_busy_loading_from_pipeline_immediate_command(self, r):
"""
BusyLoadingErrors should raise from Pipelines that execute a
command immediately, like WATCH does.
"""
pipe = r.pipeline()
with pytest.raises(redis.BusyLoadingError):
pipe.immediate_execute_command('DEBUG', 'ERROR',
'LOADING fake message')
pool = r.connection_pool
assert not pipe.connection
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt('2.8.8')
def test_busy_loading_from_pipeline(self, r):
"""
BusyLoadingErrors should be raised from a pipeline execution
regardless of the raise_on_error flag.
"""
pipe = r.pipeline()
pipe.execute_command('DEBUG', 'ERROR', 'LOADING fake message')
with pytest.raises(redis.BusyLoadingError):
pipe.execute()
pool = r.connection_pool
assert not pipe.connection
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt('2.8.8')
def test_read_only_error(self, r):
"READONLY errors get turned in ReadOnlyError exceptions"
with pytest.raises(redis.ReadOnlyError):
r.execute_command('DEBUG', 'ERROR', 'READONLY blah blah')
def test_connect_from_url_tcp(self):
connection = redis.Redis.from_url('redis://localhost')
pool = connection.connection_pool
assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == (
'ConnectionPool',
'Connection',
'host=localhost,port=6379,db=0',
)
def test_connect_from_url_unix(self):
connection = redis.Redis.from_url('unix:///path/to/socket')
pool = connection.connection_pool
assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == (
'ConnectionPool',
'UnixDomainSocketConnection',
'path=/path/to/socket,db=0',
)
def test_connect_no_auth_supplied_when_required(self, r):
"""
AuthenticationError should be raised when the server requires a
password but one isn't supplied.
"""
with pytest.raises(redis.AuthenticationError):
r.execute_command('DEBUG', 'ERROR',
'ERR Client sent AUTH, but no password is set')
def test_connect_invalid_password_supplied(self, r):
"AuthenticationError should be raised when sending the wrong password"
with pytest.raises(redis.AuthenticationError):
r.execute_command('DEBUG', 'ERROR', 'ERR invalid password')
class TestMultiConnectionClient:
@pytest.fixture()
def r(self, request):
return _get_client(redis.Redis,
request,
single_connection_client=False)
def test_multi_connection_command(self, r):
assert not r.connection
assert r.set('a', '123')
assert r.get('a') == b'123'
class TestHealthCheck:
interval = 60
@pytest.fixture()
def r(self, request):
return _get_client(redis.Redis, request,
health_check_interval=self.interval)
def assert_interval_advanced(self, connection):
diff = connection.next_health_check - time.time()
assert self.interval > diff > (self.interval - 1)
def test_health_check_runs(self, r):
r.connection.next_health_check = time.time() - 1
r.connection.check_health()
self.assert_interval_advanced(r.connection)
def test_arbitrary_command_invokes_health_check(self, r):
# invoke a command to make sure the connection is entirely setup
r.get('foo')
r.connection.next_health_check = time.time()
with mock.patch.object(r.connection, 'send_command',
wraps=r.connection.send_command) as m:
r.get('foo')
m.assert_called_with('PING', check_health=False)
self.assert_interval_advanced(r.connection)
def test_arbitrary_command_advances_next_health_check(self, r):
r.get('foo')
next_health_check = r.connection.next_health_check
r.get('foo')
assert next_health_check < r.connection.next_health_check
def test_health_check_not_invoked_within_interval(self, r):
r.get('foo')
with mock.patch.object(r.connection, 'send_command',
wraps=r.connection.send_command) as m:
r.get('foo')
ping_call_spec = (('PING',), {'check_health': False})
assert ping_call_spec not in m.call_args_list
def test_health_check_in_pipeline(self, r):
with r.pipeline(transaction=False) as pipe:
pipe.connection = pipe.connection_pool.get_connection('_')
pipe.connection.next_health_check = 0
with mock.patch.object(pipe.connection, 'send_command',
wraps=pipe.connection.send_command) as m:
responses = pipe.set('foo', 'bar').get('foo').execute()
m.assert_any_call('PING', check_health=False)
assert responses == [True, b'bar']
def test_health_check_in_transaction(self, r):
with r.pipeline(transaction=True) as pipe:
pipe.connection = pipe.connection_pool.get_connection('_')
pipe.connection.next_health_check = 0
with mock.patch.object(pipe.connection, 'send_command',
wraps=pipe.connection.send_command) as m:
responses = pipe.set('foo', 'bar').get('foo').execute()
m.assert_any_call('PING', check_health=False)
assert responses == [True, b'bar']
def test_health_check_in_watched_pipeline(self, r):
r.set('foo', 'bar')
with r.pipeline(transaction=False) as pipe:
pipe.connection = pipe.connection_pool.get_connection('_')
pipe.connection.next_health_check = 0
with mock.patch.object(pipe.connection, 'send_command',
wraps=pipe.connection.send_command) as m:
pipe.watch('foo')
# the health check should be called when watching
m.assert_called_with('PING', check_health=False)
self.assert_interval_advanced(pipe.connection)
assert pipe.get('foo') == b'bar'
# reset the mock to clear the call list and schedule another
# health check
m.reset_mock()
pipe.connection.next_health_check = 0
pipe.multi()
responses = pipe.set('foo', 'not-bar').get('foo').execute()
assert responses == [True, b'not-bar']
m.assert_any_call('PING', check_health=False)
def test_health_check_in_pubsub_before_subscribe(self, r):
"A health check happens before the first [p]subscribe"
p = r.pubsub()
p.connection = p.connection_pool.get_connection('_')
p.connection.next_health_check = 0
with mock.patch.object(p.connection, 'send_command',
wraps=p.connection.send_command) as m:
assert not p.subscribed
p.subscribe('foo')
# the connection is not yet in pubsub mode, so the normal
# ping/pong within connection.send_command should check
# the health of the connection
m.assert_any_call('PING', check_health=False)
self.assert_interval_advanced(p.connection)
subscribe_message = wait_for_message(p)
assert subscribe_message['type'] == 'subscribe'
def test_health_check_in_pubsub_after_subscribed(self, r):
"""
Pubsub can handle a new subscribe when it's time to check the
connection health
"""
p = r.pubsub()
p.connection = p.connection_pool.get_connection('_')
p.connection.next_health_check = 0
with mock.patch.object(p.connection, 'send_command',
wraps=p.connection.send_command) as m:
p.subscribe('foo')
subscribe_message = wait_for_message(p)
assert subscribe_message['type'] == 'subscribe'
self.assert_interval_advanced(p.connection)
# because we weren't subscribed when sending the subscribe
# message to 'foo', the connection's standard check_health ran
# prior to subscribing.
m.assert_any_call('PING', check_health=False)
p.connection.next_health_check = 0
m.reset_mock()
p.subscribe('bar')
# the second subscribe issues exactly only command (the subscribe)
# and the health check is not invoked
m.assert_called_once_with('SUBSCRIBE', 'bar', check_health=False)
# since no message has been read since the health check was
# reset, it should still be 0
assert p.connection.next_health_check == 0
subscribe_message = wait_for_message(p)
assert subscribe_message['type'] == 'subscribe'
assert wait_for_message(p) is None
# now that the connection is subscribed, the pubsub health
# check should have taken over and include the HEALTH_CHECK_MESSAGE
m.assert_any_call('PING', p.HEALTH_CHECK_MESSAGE,
check_health=False)
self.assert_interval_advanced(p.connection)
def test_health_check_in_pubsub_poll(self, r):
"""
Polling a pubsub connection that's subscribed will regularly
check the connection's health.
"""
p = r.pubsub()
p.connection = p.connection_pool.get_connection('_')
with mock.patch.object(p.connection, 'send_command',
wraps=p.connection.send_command) as m:
p.subscribe('foo')
subscribe_message = wait_for_message(p)
assert subscribe_message['type'] == 'subscribe'
self.assert_interval_advanced(p.connection)
# polling the connection before the health check interval
# doesn't result in another health check
m.reset_mock()
next_health_check = p.connection.next_health_check
assert wait_for_message(p) is None
assert p.connection.next_health_check == next_health_check
m.assert_not_called()
# reset the health check and poll again
# we should not receive a pong message, but the next_health_check
# should be advanced
p.connection.next_health_check = 0
assert wait_for_message(p) is None
m.assert_called_with('PING', p.HEALTH_CHECK_MESSAGE,
check_health=False)
self.assert_interval_advanced(p.connection)
|
run_reducer_admin.py
|
import asyncio
import os
import threading
from gevent.pywsgi import WSGIServer
from src.logic.reducer import Reducer
from src.admin.app import app
from src import utils
from src import secret
reducer_started_event = threading.Event()
def reducer_thread(loop: asyncio.AbstractEventLoop, reducer: Reducer) -> None:
async def task() -> None:
await reducer._before_run()
reducer_started_event.set()
await reducer._mainloop()
reducer.log('critical', 'run_reducer_admin.reducer_thread', 'reducer mainloop stopped')
t = task()
loop.create_task(t)
loop.run_forever()
if __name__=='__main__':
utils.fix_zmq_asyncio_windows()
l = asyncio.new_event_loop()
r = Reducer(f'reducer-{os.getpid()}')
threading.Thread(target=reducer_thread, args=(l, r), daemon=True).start()
reducer_started_event.wait()
app.config['reducer_loop'] = l
app.config['reducer_obj'] = r
WSGIServer(secret.REDUCER_ADMIN_SERVER_ADDR, app, log=None).serve_forever()
|
test_queue.py
|
# Some simple queue module tests, plus some failure conditions
# to ensure the Queue locks remain stable.
import Queue
import time
import unittest
from test import test_support
threading = test_support.import_module('threading')
QUEUE_SIZE = 5
# A thread to run a function that unclogs a blocked Queue.
class _TriggerThread(threading.Thread):
def __init__(self, fn, args):
self.fn = fn
self.args = args
self.startedEvent = threading.Event()
threading.Thread.__init__(self)
def run(self):
# The sleep isn't necessary, but is intended to give the blocking
# function in the main thread a chance at actually blocking before
# we unclog it. But if the sleep is longer than the timeout-based
# tests wait in their blocking functions, those tests will fail.
# So we give them much longer timeout values compared to the
# sleep here (I aimed at 10 seconds for blocking functions --
# they should never actually wait that long - they should make
# progress as soon as we call self.fn()).
time.sleep(0.1)
self.startedEvent.set()
self.fn(*self.args)
# Execute a function that blocks, and in a separate thread, a function that
# triggers the release. Returns the result of the blocking function. Caution:
# block_func must guarantee to block until trigger_func is called, and
# trigger_func must guarantee to change queue state so that block_func can make
# enough progress to return. In particular, a block_func that just raises an
# exception regardless of whether trigger_func is called will lead to
# timing-dependent sporadic failures, and one of those went rarely seen but
# undiagnosed for years. Now block_func must be unexceptional. If block_func
# is supposed to raise an exception, call do_exceptional_blocking_test()
# instead.
class BlockingTestMixin:
def do_blocking_test(self, block_func, block_args, trigger_func, trigger_args):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
self.result = block_func(*block_args)
# If block_func returned before our thread made the call, we failed!
if not self.t.startedEvent.is_set():
self.fail("blocking function '%r' appeared not to block" %
block_func)
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
return self.result
# Call this instead if block_func is supposed to raise an exception.
def do_exceptional_blocking_test(self,block_func, block_args, trigger_func,
trigger_args, expected_exception_class):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
try:
try:
block_func(*block_args)
except expected_exception_class:
raise
else:
self.fail("expected exception of kind %r" %
expected_exception_class)
finally:
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
if not self.t.startedEvent.is_set():
self.fail("trigger thread ended but event never set")
class BaseQueueTest(unittest.TestCase, BlockingTestMixin):
def setUp(self):
self.cum = 0
self.cumlock = threading.Lock()
def simple_queue_test(self, q):
if not q.empty():
raise RuntimeError, "Call this function with an empty queue"
# I guess we better check things actually queue correctly a little :)
q.put(111)
q.put(333)
q.put(222)
target_order = dict(Queue = [111, 333, 222],
LifoQueue = [222, 333, 111],
PriorityQueue = [111, 222, 333])
actual_order = [q.get(), q.get(), q.get()]
self.assertEquals(actual_order, target_order[q.__class__.__name__],
"Didn't seem to queue the correct data!")
for i in range(QUEUE_SIZE-1):
q.put(i)
self.assertTrue(not q.empty(), "Queue should not be empty")
self.assertTrue(not q.full(), "Queue should not be full")
last = 2 * QUEUE_SIZE
full = 3 * 2 * QUEUE_SIZE
q.put(last)
self.assertTrue(q.full(), "Queue should be full")
try:
q.put(full, block=0)
self.fail("Didn't appear to block with a full queue")
except Queue.Full:
pass
try:
q.put(full, timeout=0.01)
self.fail("Didn't appear to time-out with a full queue")
except Queue.Full:
pass
# Test a blocking put
self.do_blocking_test(q.put, (full,), q.get, ())
self.do_blocking_test(q.put, (full, True, 10), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(q.empty(), "Queue should be empty")
try:
q.get(block=0)
self.fail("Didn't appear to block with an empty queue")
except Queue.Empty:
pass
try:
q.get(timeout=0.01)
self.fail("Didn't appear to time-out with an empty queue")
except Queue.Empty:
pass
# Test a blocking get
self.do_blocking_test(q.get, (), q.put, ('empty',))
self.do_blocking_test(q.get, (True, 10), q.put, ('empty',))
def worker(self, q):
while True:
x = q.get()
if x is None:
q.task_done()
return
with self.cumlock:
self.cum += x
q.task_done()
def queue_join_test(self, q):
self.cum = 0
for i in (0,1):
threading.Thread(target=self.worker, args=(q,)).start()
for i in xrange(100):
q.put(i)
q.join()
self.assertEquals(self.cum, sum(range(100)),
"q.join() did not block until all tasks were done")
for i in (0,1):
q.put(None) # instruct the threads to close
q.join() # verify that you can join twice
def test_queue_task_done(self):
# Test to make sure a queue task completed successfully.
q = self.type2test()
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_queue_join(self):
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
q = self.type2test()
self.queue_join_test(q)
self.queue_join_test(q)
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_simple_queue(self):
# Do it a couple of times on the same queue.
# Done twice to make sure works with same instance reused.
q = self.type2test(QUEUE_SIZE)
self.simple_queue_test(q)
self.simple_queue_test(q)
class QueueTest(BaseQueueTest):
type2test = Queue.Queue
class LifoQueueTest(BaseQueueTest):
type2test = Queue.LifoQueue
class PriorityQueueTest(BaseQueueTest):
type2test = Queue.PriorityQueue
# A Queue subclass that can provoke failure at a moment's notice :)
class FailingQueueException(Exception):
pass
class FailingQueue(Queue.Queue):
def __init__(self, *args):
self.fail_next_put = False
self.fail_next_get = False
Queue.Queue.__init__(self, *args)
def _put(self, item):
if self.fail_next_put:
self.fail_next_put = False
raise FailingQueueException, "You Lose"
return Queue.Queue._put(self, item)
def _get(self):
if self.fail_next_get:
self.fail_next_get = False
raise FailingQueueException, "You Lose"
return Queue.Queue._get(self)
class FailingQueueTest(unittest.TestCase, BlockingTestMixin):
def failing_queue_test(self, q):
if not q.empty():
raise RuntimeError, "Call this function with an empty queue"
for i in range(QUEUE_SIZE-1):
q.put(i)
# Test a failing non-blocking put.
q.fail_next_put = True
try:
q.put("oops", block=0)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.fail_next_put = True
try:
q.put("oops", timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.put("last")
self.assertTrue(q.full(), "Queue should be full")
# Test a failing blocking put
q.fail_next_put = True
try:
self.do_blocking_test(q.put, ("full",), q.get, ())
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
# Test a failing timeout put
q.fail_next_put = True
try:
self.do_exceptional_blocking_test(q.put, ("full", True, 10), q.get, (),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
self.assertTrue(q.full(), "Queue should be full")
q.get()
self.assertTrue(not q.full(), "Queue should not be full")
q.put("last")
self.assertTrue(q.full(), "Queue should be full")
# Test a blocking put
self.do_blocking_test(q.put, ("full",), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(q.empty(), "Queue should be empty")
q.put("first")
q.fail_next_get = True
try:
q.get()
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(not q.empty(), "Queue should not be empty")
q.fail_next_get = True
try:
q.get(timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(not q.empty(), "Queue should not be empty")
q.get()
self.assertTrue(q.empty(), "Queue should be empty")
q.fail_next_get = True
try:
self.do_exceptional_blocking_test(q.get, (), q.put, ('empty',),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# put succeeded, but get failed.
self.assertTrue(not q.empty(), "Queue should not be empty")
q.get()
self.assertTrue(q.empty(), "Queue should be empty")
def test_failing_queue(self):
# Test to make sure a queue is functioning correctly.
# Done twice to the same instance.
q = FailingQueue(QUEUE_SIZE)
self.failing_queue_test(q)
self.failing_queue_test(q)
def test_main():
test_support.run_unittest(QueueTest, LifoQueueTest, PriorityQueueTest,
FailingQueueTest)
if __name__ == "__main__":
test_main()
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. versionadded:: 2014.7.0
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends: - CherryPy Python module
:optdepends: - ws4py Python module for websockets support.
:configuration: All authentication is done through Salt's :ref:`external auth
<acl-eauth>` system which requires additional configuration not described
here.
Example production-ready configuration; add to the Salt master config file:
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
Using only a secure HTTPS connection is strongly recommended since Salt
authentication credentials will be sent over the wire.
A self-signed certificate can be generated using the
:py:func:`~salt.modules.tls.create_self_signed_cert` function in Salt (note
the dependencies for this module).
.. code-block:: bash
salt-call tls.create_self_signed_cert
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
app
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways:
* Include a custom header named :mailheader:`X-Auth-Token`.
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
Commands are sent to a running Salt master via this module by sending HTTP
requests to the URLs detailed below.
.. admonition:: Content negotiation
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
Data sent in :http:method:`post` and :http:method:`put` requests must be in
the format of a list of lowstate dictionaries. This allows multiple commands to
be executed in a single HTTP request.
.. glossary::
lowstate
A dictionary containing various keys that instruct Salt which command
to run, where that command lives, any parameters for that command, any
authentication credentials, what returner to use, etc.
Salt uses the lowstate data format internally in many places to pass
command data between functions. Salt also uses lowstate for the
:ref:`LocalClient() <python-api>` Python API interface.
The following example (in JSON format) causes Salt to execute two commands::
[{
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": ["10"]
},
{
"client": "runner",
"fun": "jobs.lookup_jid",
"jid": "20130603122505459265"
}]
.. admonition:: x-www-form-urlencoded
Sending JSON or YAML in the request body is simple and most flexible,
however sending data in urlencoded format is also supported with the
caveats below. It is the default format for HTML forms, many JavaScript
libraries, and the :command:`curl` command.
For example, the equivalent to running ``salt '*' test.ping`` is sending
``fun=test.ping&arg&client=local&tgt=*`` in the HTTP request body.
Caveats:
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
parameters. E.g., ``arg=one``, ``arg=two`` will be sent as ``arg[]=one``,
``arg[]=two``. This is not supported; send JSON or YAML instead.
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
import collections
import itertools
import functools
import logging
import json
import StringIO
import tarfile
import time
from multiprocessing import Process, Pipe
# Import third-party libs
import cherrypy
from cherrypy.lib import cpstats
import yaml
# Import Salt libs
import salt
import salt.auth
import salt.utils.event
# Import salt-api libs
import salt.netapi
logger = logging.getLogger(__name__)
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
cherrypy.response.status = 403
return {
'status': cherrypy.response.status,
'return': "Bad IP",
}
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if not cherrypy.session.has_key('token'): # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_handler(*args, **kwargs):
'''
Check a CORS preflight request and return a valid response
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = ['X-Auth-Token', 'Content-Type']
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
return {}
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# If this is a non-simple CORS preflight request swap out the handler.
if cherrypy.request.method == 'OPTIONS':
cherrypy.serving.request.handler = cors_handler
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except salt.exceptions.EauthAuthenticationError:
raise cherrypy.HTTPError(401)
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
return out(ret)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
@functools.wraps
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if not isinstance(data, list):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
cherrypy.tools.cors_tool = cherrypy.Tool('before_handler',
cors_tool, priority=30)
cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler',
lowdata_fmt, priority=40)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler',
salt_ip_verify_tool)
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
# Grab all available client interfaces
clients = [name for name, _ in inspect.getmembers(salt.netapi.NetapiClient,
predicate=inspect.ismethod) if not name.startswith('__')]
clients.remove('run') # run method calls client interfaces
return {
'return': "Welcome",
'clients': clients,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -si https://localhost:8000 \\
-H "Accept: application/x-yaml" \\
-H "X-Auth-Token: d40d1e1e" \\
-d client=local \\
-d tgt='*' \\
-d fun='test.ping' \\
-d arg
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Length: 36
Content-Type: application/x-www-form-urlencoded
fun=test.ping&arg&client=local&tgt=*
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-H "Accept: application/x-yaml" \\
-d tgt='*' \\
-d fun='status.diskusage'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 26
Content-Type: application/x-www-form-urlencoded
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, jid=None):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = [{
'client': 'runner',
'fun': 'jobs.lookup_jid' if jid else 'jobs.list_jobs',
'jid': jid,
}]
if jid:
lowstate.append({
'client': 'runner',
'fun': 'jobs.list_job',
'jid': jid,
})
cherrypy.request.lowstate = lowstate
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
job_ret, job_info = job_ret_info
ret['info'] = [job_info]
else:
job_ret = job_ret_info[0]
ret['return'] = [job_ret]
return ret
class Keys(LowDataAdapter):
def GET(self, mid=None):
'''
A convenience URL for showing the list of minion keys or detail on a
specific key
.. http:get:: /keys/(mid)
List all keys or show a specific key
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
self._cp_config['tools.salt_token.on'] = True
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
def POST(self, mid, keysize=None, force=None, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sS http://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS http://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
self._cp_config['tools.hypermedia_out.on'] = False
self._cp_config['tools.sessions.on'] = False
lowstate = [{
'client': 'wheel',
'fun': 'key.gen_accept',
'id_': mid,
}]
if keysize:
lowstate[0]['keysize'] = keysize
if force:
lowstate[0]['force'] = force
lowstate[0].update(kwargs)
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = StringIO.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, StringIO.StringIO(pub_key))
tarball.addfile(priv_key_file, StringIO.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(mid)
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
perms = eauth.get(token['name'], eauth.get('*'))
if perms is None:
raise ValueError("Eauth permission list not found.")
except (AttributeError, IndexError, KeyError, ValueError):
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
raise cherrypy.HTTPError(500,
'Configuration for external_auth could not be read.')
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms,
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
'''
Class to run commands without normal session handling
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
.. http:post:: /run
This entry point is primarily for "one-off" commands. Each request
must pass full Salt authentication credentials. Otherwise this URL
is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`.
:term:`lowstate` data describing Salt commands must be sent in the
request body.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='local' \\
-d tgt='*' \\
-d fun='test.ping' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='pam'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
data: {'tag': '', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
data: {'tag': '20130802115730568475', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
# Note, you must be authenticated!
var source = new EventSource('/events');
source.onopen = function() { console.debug('opening') };
source.onerror = function(e) { console.debug('error!', e) };
source.onmessage = function(e) { console.debug(e.data) };
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events', {withCredentials: true});
Some browser clients lack CORS support for the ``EventSource()`` API. Such
clients may instead pass the :mailheader:`X-Auth-Token` value as an URL
parameter:
.. code-block:: bash
curl -NsS localhost:8000/events/6d1b722e
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_sesion, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_sesion.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts)
stream = event.iter_events(full=True)
yield u'retry: {0}\n'.format(400)
while True:
data = stream.next()
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:**
curl -NsS \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: http://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: http://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_sesion, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_sesion.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts)
stream = event.iter_events(full=True)
SaltInfo = event_processor.SaltInfo(handler)
while True:
data = stream.next()
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
time.sleep(0.1)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- 'curl -sS http://saltapi-url.example.com:8000/hook/travis/build/success -d branch="${TRAVIS_BRANCH}" -d commit="${TRAVIS_COMMIT}"'
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/x-www-form-urlencoded
foo=Foo&bar=Bar!
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``http://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: yaml
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
raw_body = cherrypy.serving.request.raw_body
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
return cherrypy.lib.static.serve_file(apiopts['app'])
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
for url, cls in self.url_map.items():
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
if 'app' in self.apiopts:
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'engine.timeout_monitor.on': self.apiopts.get(
'expire_responses', True),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.cpstats.on': self.apiopts.get('collect_stats', False),
'tools.cors_tool.on': True,
},
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
runAll.py
|
#!/usr/bin/python
import sys, getopt
import os
import threading
import ntpath
import ghProc
CPP_PATH = 'projects' + os.sep + 'top_C++'
C_PATH = 'projects' + os.sep + 'top_C'
def processProject(projPath):
"""thread worker function"""
print('processProject : %s\n' % projPath)
#print threading.current_thread().name
ghProc.processLog(projPath)
return
#threads = []
def processProjects(projList):
for pj in projList:
t = threading.Thread(target=processProject, name=ntpath.basename(pj), args=(pj,))
#threads.append(t)
t.start()
def findAll(root):
proj_list = []
for item in os.listdir(root):
if os.path.isdir(os.path.join(root, item)):
proj_list.append(item)
return proj_list
def main(argv):
inputfile = ''
outputfile = ''
c_projects = []
cpp_projects = []
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print('runAll.py -i <inputfile> -o <outputfile>')
sys.exit(2)
if len(opts) == 0:
#no argument is passed
print('runAll.py -i <inputfile> -o <outputfile>')
sys.exit()
for opt, arg in opts:
if opt == '-h':
print('runAll.py -i <inputfile> -o <outputfile>')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
print('Input file is :', inputfile)
print('Output file is :', outputfile)
#populate arrays with c and c++ projects
#cpp_projects = findAll(CPP_PATH)
#c_projects = findAll(C_PATH)
#print cpp_projects
#print c_projects
project_paths = []
f = open(inputfile, 'r')
orig_stdout = sys.stdout
orig_stderr = sys.stderr
for line in f:
project_path = line.strip()
if project_path.startswith('#'):
continue
# if project_name in cpp_projects:
# project_path = os.path.join(CPP_PATH, project_name)
# elif project_name in c_projects:
# project_path = os.path.join(C_PATH, project_name)
# else:
# project_path = ''
sys.stdout = orig_stdout
sys.stderr = orig_stderr
print(project_path)
#project_paths.append(project_path)
project_name = ntpath.basename(project_path)
sys.stdout = open(project_name + '.out', 'w')
sys.stderr = open(project_name + '.err', 'w')
print(project_path)
ghProc.processLog(project_path)
f.close()
#processProjects(project_paths)
if __name__ == "__main__":
main(sys.argv[1:])
|
email.py
|
from threading import Thread
from flask import current_app
from flask_mail import Message
from app import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject=subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(current_app._get_current_object(), msg)).start()
|
lisp-itr.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-itr.py
#
# This file performs LISP Ingress Tunnel Router (ITR) functionality.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import lisp
import lispconfig
import socket
import select
import threading
import pcappy
import time
import os
import commands
import struct
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
II1iII1i = [ None , None , None ]
oO0oIIII = None
Oo0oO0oo0oO00 = None
i111I = None
II1Ii1iI1i = None
iiI1iIiI = lisp . lisp_get_ephemeral_port ( )
OOo = lisp . lisp_get_ephemeral_port ( )
Ii1IIii11 = None
Oooo0000 = None
i11 = None
I11 = None
if 98 - 98: i11iIiiIii * I1IiiI % iII111i * iII111i * II111iiii
if 79 - 79: IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo
oo0O000OoO = False
if 34 - 34: I11i * I1IiiI
if 31 - 31: II111iiii + OoO0O00 . I1Ii111
if 68 - 68: I1IiiI - i11iIiiIii - OoO0O00 / OOooOOo - OoO0O00 + i1IIi
if 48 - 48: OoooooooOO % o0oOOo0O0Ooo . I1IiiI - Ii1I % i1IIi % OoooooooOO
i1iIIi1 = threading . Lock ( )
if 50 - 50: i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
def IIiiIiI1 ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "ITR" , [ ] ) )
if 41 - 41: OoOoOO00
if 13 - 13: Oo0Ooo . i11iIiiIii - iIii1I11I1II1 - OoOoOO00
if 6 - 6: I1IiiI / Oo0Ooo % Ii1I
if 84 - 84: i11iIiiIii . o0oOOo0O0Ooo
if 100 - 100: Ii1I - Ii1I - I1Ii111
if 20 - 20: OoooooooOO
if 13 - 13: i1IIi - Ii1I % oO0o / iIii1I11I1II1 % iII111i
def oo ( parameter ) :
return ( lispconfig . lisp_show_crypto_list ( "ITR" ) )
if 68 - 68: I11i + OOooOOo . iIii1I11I1II1 - IiII % iIii1I11I1II1 - ooOoO0o
if 79 - 79: Oo0Ooo + I1IiiI - iII111i
if 83 - 83: ooOoO0o
if 64 - 64: OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
def I1i1iii ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_rloc_probe_command ( "ITR" ) )
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
def oo0 ( lisp_sockets , lisp_ephem_port ) :
lisp . lisp_set_exception ( )
if 57 - 57: OOooOOo . OOooOOo
if 95 - 95: O0 + OoO0O00 . II111iiii / O0
if 97 - 97: ooOoO0o - OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - OoooooooOO
if 59 - 59: O0 + I1IiiI + IiII % I1IiiI
for o0OOoo0OO0OOO in lisp . lisp_crypto_keys_by_nonce . values ( ) :
for iI1iI1I1i1I in o0OOoo0OO0OOO : del ( iI1iI1I1i1I )
if 24 - 24: I1ii11iIi11i
lisp . lisp_crypto_keys_by_nonce = { }
if 56 - 56: ooOoO0o
if 92 - 92: iII111i . I11i + o0oOOo0O0Ooo
if 28 - 28: i1IIi * Oo0Ooo - o0oOOo0O0Ooo * IiII * Ii1I / OoO0O00
if 94 - 94: II111iiii % I1ii11iIi11i / OoOoOO00 * iIii1I11I1II1
if 54 - 54: o0oOOo0O0Ooo - I1IiiI + OoooooooOO
if ( lisp . lisp_l2_overlay ) :
O0o0 = lisp . LISP_AFI_MAC
OO00Oo = lisp . lisp_default_iid
O0OOO0OOoO0O = lisp . lisp_address ( O0o0 , "0000-0000-0000" , 0 , OO00Oo )
O0OOO0OOoO0O . mask_len = 0
O00Oo000ooO0 = lisp . lisp_address ( O0o0 , "ffff-ffff-ffff" , 48 , OO00Oo )
lisp . lisp_send_map_request ( lisp_sockets , lisp_ephem_port , O0OOO0OOoO0O , O00Oo000ooO0 , None )
if 100 - 100: O0 + IiII - OOooOOo + i11iIiiIii * Ii1I
if 30 - 30: o0oOOo0O0Ooo . Ii1I - OoooooooOO
if 8 - 8: i1IIi - iIii1I11I1II1 * II111iiii + i11iIiiIii / I1Ii111 % OOooOOo
if 16 - 16: I1ii11iIi11i + OoO0O00 - II111iiii
if 85 - 85: OoOoOO00 + i1IIi
lisp . lisp_timeout_map_cache ( lisp . lisp_map_cache )
if 58 - 58: II111iiii * OOooOOo * I1ii11iIi11i / OOooOOo
if 75 - 75: oO0o
if 50 - 50: Ii1I / Oo0Ooo - oO0o - I11i % iII111i - oO0o
if 91 - 91: OoO0O00 / I11i - II111iiii . I11i
i11 = threading . Timer ( 60 , oo0 ,
[ lisp_sockets , lisp_ephem_port ] )
i11 . start ( )
return
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
def OO0oOoOO0oOO0 ( lisp_socket ) :
lisp . lisp_set_exception ( )
if 86 - 86: OOooOOo
OOoo0O = lisp . lisp_get_timestamp ( )
for Oo0ooOo0o in lisp . lisp_db_list :
if ( Oo0ooOo0o . dynamic_eid_configured ( ) == False ) : continue
if 22 - 22: iIii1I11I1II1 / i11iIiiIii * iIii1I11I1II1 * II111iiii . OOooOOo / i11iIiiIii
Iiii = [ ]
for OO0OoO0o00 in Oo0ooOo0o . dynamic_eids . values ( ) :
ooOO0O0ooOooO = OO0OoO0o00 . last_packet
if ( ooOO0O0ooOooO == None ) : continue
if ( ooOO0O0ooOooO + OO0OoO0o00 . timeout > OOoo0O ) : continue
if 55 - 55: o0oOOo0O0Ooo * OoOoOO00
if 61 - 61: I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
if ( lisp . lisp_program_hardware ) :
IIIIiiII111 = OO0OoO0o00 . dynamic_eid . print_prefix_no_iid ( )
if ( lisp . lisp_arista_is_alive ( IIIIiiII111 ) ) :
lisp . lprint ( ( "Hardware indicates dynamic-EID {} " + "still active" ) . format ( lisp . green ( IIIIiiII111 , False ) ) )
if 97 - 97: I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
continue
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
oOOo0oOo0 = OO0OoO0o00 . dynamic_eid . print_address ( )
II = "learn%{}%None" . format ( oOOo0oOo0 )
II = lisp . lisp_command_ipc ( II , "lisp-itr" )
lisp . lisp_ipc ( II , lisp_socket , "lisp-etr" )
if 60 - 60: I1IiiI
lisp . lprint ( "Dynamic-EID {}" . format ( lisp . bold ( lisp . green ( oOOo0oOo0 , False ) + " activity timeout" ,
# II111iiii . I1IiiI
False ) ) )
Iiii . append ( oOOo0oOo0 )
if 1 - 1: Oo0Ooo / o0oOOo0O0Ooo % iII111i * IiII . i11iIiiIii
if 2 - 2: I1ii11iIi11i * I11i - iIii1I11I1II1 + I1IiiI . oO0o % iII111i
if 92 - 92: iII111i
if 25 - 25: Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
for oOOo0oOo0 in Iiii : Oo0ooOo0o . dynamic_eids . pop ( oOOo0oOo0 )
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
threading . Timer ( lisp . LISP_DEFAULT_DYN_EID_TIMEOUT ,
OO0oOoOO0oOO0 , [ lisp_socket ] ) . start ( )
return
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
def oOooOo0 ( ) :
if ( lisp . lisp_is_macos ( ) ) : return ( [ "en0" , "en1" , "lo0" ] )
if 38 - 38: I1Ii111
if 84 - 84: iIii1I11I1II1 % iII111i / iIii1I11I1II1 % I11i
if 45 - 45: O0
if 26 - 26: I11i - iIii1I11I1II1 - I1IiiI / OoO0O00 . OoOoOO00 % iIii1I11I1II1
OO = "Link encap"
iIiIIi1 = commands . getoutput ( "ifconfig | egrep '{}'" . format ( OO ) )
if ( iIiIIi1 == "" ) :
OO = ": flags="
iIiIIi1 = commands . getoutput ( "ifconfig | egrep '{}'" . format ( OO ) )
if 7 - 7: ooOoO0o - Oo0Ooo - oO0o + ooOoO0o
if 26 - 26: Ii1I
iIiIIi1 = iIiIIi1 . split ( "\n" )
if 35 - 35: Ii1I - I1IiiI % o0oOOo0O0Ooo . OoooooooOO % Ii1I
I1i1Iiiii = [ ]
for OOo0oO00ooO00 in iIiIIi1 :
oOO0O00oO0Ooo = OOo0oO00ooO00 . split ( OO ) [ 0 ] . replace ( " " , "" )
I1i1Iiiii . append ( oOO0O00oO0Ooo )
if 67 - 67: OoO0O00 - OOooOOo
return ( I1i1Iiiii )
if 36 - 36: IiII
if 36 - 36: ooOoO0o / O0 * Oo0Ooo - OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
def I111I1Iiii1i ( ) :
global II1iII1i
global oO0oIIII
global Oo0oO0oo0oO00
global i111I
global II1Ii1iI1i
global Ii1IIii11 , Oooo0000
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
lisp . lisp_i_am ( "itr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "ITR starting up" )
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
lisp . lisp_get_local_interfaces ( )
lisp . lisp_get_local_macs ( )
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
II1iII1i [ 0 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV4 )
II1iII1i [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
oO0oIIII = lisp . lisp_open_listen_socket ( "" , "lisp-itr" )
Oo0oO0oo0oO00 = lisp . lisp_open_listen_socket ( "" , "lispers.net-itr" )
II1iII1i [ 2 ] = oO0oIIII
OoOOoOooooOOo = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
i111I = lisp . lisp_open_listen_socket ( OoOOoOooooOOo ,
str ( iiI1iIiI ) )
if 87 - 87: I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
II1Ii1iI1i = lisp . lisp_open_listen_socket ( "0.0.0.0" ,
str ( OOo ) )
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
Ii1IIii11 = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
Ii1IIii11 . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if ( lisp . lisp_is_raspbian ( ) == False ) :
Oooo0000 = socket . socket ( socket . AF_INET6 , socket . SOCK_RAW ,
socket . IPPROTO_UDP )
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
lisp . lisp_ipc_socket = oO0oIIII
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
threading . Thread ( target = OoOOo0OOoO ) . start ( )
if 72 - 72: Ii1I
if 1 - 1: OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
lisp . lisp_load_checkpoint ( )
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
if 76 - 76: IiII * iII111i
if 52 - 52: OOooOOo
lisp . lisp_load_split_pings = ( os . getenv ( "LISP_LOAD_SPLIT_PINGS" ) != None )
if 19 - 19: I1IiiI
if 25 - 25: Ii1I / ooOoO0o
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
i11 = threading . Timer ( 60 , oo0 ,
[ II1iII1i , iiI1iIiI ] )
i11 . start ( )
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
threading . Timer ( lisp . LISP_DEFAULT_DYN_EID_TIMEOUT ,
OO0oOoOO0oOO0 , [ oO0oIIII ] ) . start ( )
return ( True )
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
def o0ooooO0o0O ( ) :
iiIi11iI1iii = open ( "./lisp.config" , "r" )
if 67 - 67: O0 / I1Ii111
OOO0000oO = False
iI1i111I1Ii = 0
for i11i1ii1I in iiIi11iI1iii :
if ( i11i1ii1I == "lisp database-mapping {\n" ) : OOO0000oO = True
if ( i11i1ii1I == "}\n" ) : OOO0000oO = False
if ( OOO0000oO == False ) : continue
if ( i11i1ii1I [ 0 ] == " " and i11i1ii1I . find ( "prefix {" ) != - 1 ) : iI1i111I1Ii += 1
if 88 - 88: I11i % I1ii11iIi11i
iiIi11iI1iii . close ( )
return ( iI1i111I1Ii )
if 48 - 48: ooOoO0o / I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / i1IIi
if 92 - 92: Oo0Ooo % Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
def O00oOOooo ( ) :
if 50 - 50: I1ii11iIi11i % O0 * o0oOOo0O0Ooo
if 5 - 5: IiII * OoOoOO00
if 5 - 5: I1Ii111
if 90 - 90: I1Ii111 . ooOoO0o / Ii1I - I11i
if 40 - 40: OoooooooOO
iI1i111I1Ii = o0ooooO0o0O ( )
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
I1IiIiiIiIII = os . getenv ( "LISP_ITR_WAIT_TIME" )
I1IiIiiIiIII = 1 if ( I1IiIiiIiIII == None ) else int ( I1IiIiiIiIII )
if 8 - 8: oO0o / I1ii11iIi11i
if 20 - 20: I1IiiI
if 95 - 95: iII111i - I1IiiI
if 34 - 34: ooOoO0o * I1IiiI . i1IIi * ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
while ( iI1i111I1Ii != len ( lisp . lisp_db_list ) ) :
lisp . lprint ( ( "Waiting {} second(s) for {} database-mapping EID-" + "prefixes, {} processed so far ..." ) . format ( I1IiIiiIiIII , iI1i111I1Ii ,
# I1ii11iIi11i % OoOoOO00 * OoO0O00 % II111iiii
len ( lisp . lisp_db_list ) ) )
time . sleep ( I1IiIiiIiIII )
if 70 - 70: OoO0O00 % oO0o + OOooOOo / Ii1I % O0
if 100 - 100: o0oOOo0O0Ooo + OOooOOo * o0oOOo0O0Ooo
if 80 - 80: o0oOOo0O0Ooo * O0 - Ii1I
if 66 - 66: i11iIiiIii - OOooOOo * Oo0Ooo
if 76 - 76: i11iIiiIii + o0oOOo0O0Ooo / I1ii11iIi11i - OoO0O00 - Ii1I + I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 . ooOoO0o + iIii1I11I1II1
oOoOO = [ ]
Ii1i1 = [ ]
for Oo0ooOo0o in lisp . lisp_db_list :
if ( Oo0ooOo0o . eid . is_ipv4 ( ) or Oo0ooOo0o . eid . is_ipv6 ( ) or Oo0ooOo0o . eid . is_mac ( ) ) :
oOOo0oOo0 = Oo0ooOo0o . eid . print_prefix_no_iid ( )
if ( Oo0ooOo0o . dynamic_eid_configured ( ) ) : Ii1i1 . append ( oOOo0oOo0 )
oOoOO . append ( oOOo0oOo0 )
if 65 - 65: ooOoO0o . OoooooooOO / I1ii11iIi11i . i1IIi * OoO0O00
if 19 - 19: i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
return ( oOoOO , Ii1i1 )
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
def OoOOo0OOoO ( ) :
global i1iIIi1
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
lisp . lisp_set_exception ( )
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
oOoOO , Ii1i1 = O00oOOooo ( )
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
if 54 - 54: i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
if 5 - 5: Ii1I
if 46 - 46: IiII
ii1iIi1iIiI1i = None
if ( lisp . lisp_ipc_data_plane ) :
lisp . lprint ( lisp . bold ( "Data-plane packet capture disabled" , False ) )
ii1iIi1iIiI1i = "(udp src port 4342 and ip[28] == 0x28)" + " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
if 40 - 40: i1IIi % OOooOOo
if 71 - 71: OoOoOO00
lisp . lprint ( "Control-plane capture: '{}'" . format ( ii1iIi1iIiI1i ) )
else :
lisp . lprint ( "Capturing packets for source-EIDs {}" . format ( lisp . green ( str ( oOoOO ) , False ) ) )
if 14 - 14: i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
if ( lisp . lisp_pitr ) : lisp . lprint ( "Configured for PITR functionality" )
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
i1 = lisp . lisp_l2_overlay
if ( i1 == False ) :
if ( lisp . lisp_is_linux ( ) ) : OO0oOOoo ( oOoOO , Ii1i1 )
if 52 - 52: o0oOOo0O0Ooo % Oo0Ooo
if 64 - 64: O0 % I11i % O0 * OoO0O00 . oO0o + I1IiiI
if 75 - 75: I11i . OoooooooOO % o0oOOo0O0Ooo * I11i % OoooooooOO
if 13 - 13: IiII / i11iIiiIii % II111iiii % I11i . I1ii11iIi11i
if 8 - 8: OoOoOO00 + Oo0Ooo - II111iiii
if 11 - 11: i1IIi % i11iIiiIii - i1IIi * OoOoOO00
if ( ii1iIi1iIiI1i == None ) :
if ( lisp . lisp_pitr ) :
i1I11IiI1iiII = o00oOo0oOoo ( oOoOO , [ ] , False , True )
else :
i1I11IiI1iiII = o00oOo0oOoo ( oOoOO , Ii1i1 , i1 ,
False )
if 57 - 57: OoOoOO00 - I1ii11iIi11i
else :
i1I11IiI1iiII = ii1iIi1iIiI1i
if 50 - 50: I1Ii111 / i1IIi % OoO0O00 . I1IiiI / iII111i
if 88 - 88: OOooOOo . I11i * o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . I11i
if 10 - 10: o0oOOo0O0Ooo * Oo0Ooo % O0 * iIii1I11I1II1 . O0 % I1ii11iIi11i
if 44 - 44: II111iiii / iII111i / I11i % II111iiii / i1IIi . Ii1I
if 59 - 59: OoooooooOO
iIiIIi1 = oOooOo0 ( )
i1iiiii1 = os . getenv ( "LISP_PCAP_LIST" )
if ( i1iiiii1 == None ) :
O0iII1 = ""
IIII1i = [ ]
else :
Ii1IIIIi1ii1I = list ( set ( i1iiiii1 . split ( ) ) & set ( iIiIIi1 ) )
IIII1i = list ( set ( i1iiiii1 . split ( ) ) ^ set ( iIiIIi1 ) )
O0iII1 = "user-selected "
lisp . lprint ( "User pcap-list: {}, active-interfaces: {}" . format ( i1iiiii1 , iIiIIi1 ) )
if 13 - 13: I1IiiI % OoOoOO00 . I1ii11iIi11i / Oo0Ooo % OOooOOo . OoooooooOO
iIiIIi1 = Ii1IIIIi1ii1I
if 22 - 22: IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
for oo00O00oO000o in iIiIIi1 :
OOo00OoO = [ oo00O00oO000o , i1I11IiI1iiII , i1iIIi1 ]
lisp . lprint ( "Capturing packets on {}interface {}" . format ( O0iII1 , oo00O00oO000o ) )
threading . Thread ( target = iIi1 , args = OOo00OoO ) . start ( )
if 21 - 21: I11i
if ( ii1iIi1iIiI1i ) : return
if 92 - 92: i11iIiiIii / I1Ii111 - iII111i % ooOoO0o * I1Ii111 + Oo0Ooo
if 11 - 11: OoooooooOO . I1Ii111
if 80 - 80: OoooooooOO - OOooOOo * Ii1I * I1ii11iIi11i / I1IiiI / OOooOOo
if 13 - 13: I1Ii111 * ooOoO0o + i11iIiiIii * I1Ii111 - ooOoO0o
if 23 - 23: iIii1I11I1II1 * i1IIi % OoooooooOO * IiII
I1Iiiiiii = "(udp src port 4342 and ip[28] == 0x28)"
for oo00O00oO000o in IIII1i :
OOo00OoO = [ oo00O00oO000o , I1Iiiiiii , i1iIIi1 ]
lisp . lprint ( "Capture RLOC-probe replies on RLOC interface {}" . format ( oo00O00oO000o ) )
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
threading . Thread ( target = iIi1 , args = OOo00OoO ) . start ( )
if 69 - 69: O0
return
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
if 73 - 73: i1IIi / i11iIiiIii
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
if 85 - 85: OoOoOO00 + OOooOOo
def I1II ( ) :
if 27 - 27: II111iiii / Ii1I . OOooOOo
if 9 - 9: ooOoO0o - I1ii11iIi11i - iII111i
if 82 - 82: IiII - IiII + OoOoOO00
if 8 - 8: o0oOOo0O0Ooo % iII111i * oO0o % Ii1I . ooOoO0o / ooOoO0o
if ( I11 ) : I11 . cancel ( )
if 81 - 81: OoO0O00
if 99 - 99: oO0o * II111iiii * I1Ii111
if 92 - 92: Oo0Ooo
if 40 - 40: OoOoOO00 / IiII
lisp . lisp_close_socket ( II1iII1i [ 0 ] , "" )
lisp . lisp_close_socket ( II1iII1i [ 1 ] , "" )
lisp . lisp_close_socket ( i111I , "" )
lisp . lisp_close_socket ( II1Ii1iI1i , "" )
lisp . lisp_close_socket ( oO0oIIII , "lisp-itr" )
lisp . lisp_close_socket ( Oo0oO0oo0oO00 , "lispers.net-itr" )
return
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
if 90 - 90: iII111i
if 31 - 31: OOooOOo + O0
if 87 - 87: ooOoO0o
def IIIii ( packet , device , input_interface , macs , my_sa ) :
global II1iII1i
global iiI1iIiI
global Ii1IIii11 , Oooo0000
global oO0oIIII
if 83 - 83: IiII % o0oOOo0O0Ooo % I1IiiI . iIii1I11I1II1 - IiII
if 88 - 88: OoooooooOO
if 84 - 84: OoOoOO00 / I11i * iII111i / oO0o - i11iIiiIii . Oo0Ooo
if 60 - 60: I1ii11iIi11i * I1IiiI
I1iIiI11I1 = packet
packet , i1oOOoo0o0OOOO , i1IiII1III , i1O00oo = lisp . lisp_is_rloc_probe ( packet , 1 )
if ( I1iIiI11I1 != packet ) :
if ( i1oOOoo0o0OOOO == None ) : return
lisp . lisp_parse_packet ( II1iII1i , packet , i1oOOoo0o0OOOO , i1IiII1III , i1O00oo )
return
if 77 - 77: iII111i % OOooOOo - I11i % ooOoO0o - OoO0O00 / Oo0Ooo
if 4 - 4: OoooooooOO - i1IIi % Ii1I - OOooOOo * o0oOOo0O0Ooo
packet = lisp . lisp_packet ( packet )
if ( packet . decode ( False , None , None ) == None ) : return
if 85 - 85: OoooooooOO * iIii1I11I1II1 . iII111i / OoooooooOO % I1IiiI % O0
if 36 - 36: Ii1I / II111iiii / IiII / IiII + I1ii11iIi11i
if 95 - 95: IiII
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
if 72 - 72: oO0o + oO0o / II111iiii . OoooooooOO % Ii1I
if 49 - 49: oO0o . OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if ( my_sa ) : input_interface = device
if 2 - 2: OoooooooOO % OOooOOo
if 63 - 63: I1IiiI % iIii1I11I1II1
if 39 - 39: iII111i / II111iiii / I1ii11iIi11i % I1IiiI
if 89 - 89: I1Ii111 + OoooooooOO + I1Ii111 * i1IIi + iIii1I11I1II1 % I11i
oOo0oO = packet . inner_source
OO00Oo = lisp . lisp_get_interface_instance_id ( input_interface , oOo0oO )
packet . inner_dest . instance_id = OO00Oo
packet . inner_source . instance_id = OO00Oo
if 5 - 5: OOooOOo - OOooOOo . Oo0Ooo + OoOoOO00 - OOooOOo . oO0o
if 31 - 31: II111iiii - iIii1I11I1II1 - iIii1I11I1II1 % I11i
if 12 - 12: iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo / i1IIi
if ( macs != "" ) : macs = ", MACs: " + macs + ","
packet . print_packet ( "Receive {}{}" . format ( device , macs ) , False )
if 71 - 71: OoOoOO00 . i1IIi
if 94 - 94: OOooOOo . I1Ii111
if 84 - 84: O0 . I11i - II111iiii . ooOoO0o / II111iiii
if 47 - 47: OoooooooOO
if ( device != input_interface ) :
lisp . dprint ( "Not our MAC address on interface {}, pcap interface {}" . format ( input_interface , device ) )
if 4 - 4: I1IiiI % I11i
return
if 10 - 10: IiII . OoooooooOO - OoO0O00 + IiII - O0
if 82 - 82: ooOoO0o + II111iiii
II1i1i1iII1 = lisp . lisp_decent_configured
if ( II1i1i1iII1 ) :
oOo000 = packet . inner_dest . is_multicast_address ( )
IIi = packet . inner_source . is_local ( )
II1i1i1iII1 = ( IIi and oOo000 )
if 27 - 27: OOooOOo % Ii1I
if 58 - 58: OOooOOo * o0oOOo0O0Ooo + O0 % OOooOOo
if ( II1i1i1iII1 == False ) :
if 25 - 25: Oo0Ooo % I1ii11iIi11i * ooOoO0o
if 6 - 6: iII111i . IiII * OoOoOO00 . i1IIi
if 98 - 98: i1IIi
if 65 - 65: OoOoOO00 / OoO0O00 % IiII
Oo0ooOo0o = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_source , False )
if ( Oo0ooOo0o == None ) :
lisp . dprint ( "Packet received from non-EID source" )
return
if 45 - 45: OoOoOO00
if 66 - 66: OoO0O00
if 56 - 56: O0
if 61 - 61: o0oOOo0O0Ooo / OOooOOo / Oo0Ooo * O0
if 23 - 23: oO0o - OOooOOo + I11i
if ( Oo0ooOo0o . dynamic_eid_configured ( ) ) :
II11 = lisp . lisp_allow_dynamic_eid ( input_interface ,
packet . inner_source )
if ( II11 ) :
lisp . lisp_itr_discover_eid ( Oo0ooOo0o , packet . inner_source ,
input_interface , II11 , oO0oIIII )
else :
Iiii11iIi1 = lisp . green ( packet . inner_source . print_address ( ) , False )
lisp . dprint ( "Disallow dynamic-EID {} on interface {}" . format ( Iiii11iIi1 ,
input_interface ) )
return
if 40 - 40: I11i % OoO0O00 . I1Ii111
if 84 - 84: OoOoOO00 % ooOoO0o - OoOoOO00 . o0oOOo0O0Ooo
if 5 - 5: OoOoOO00 * I1Ii111 - I1ii11iIi11i / iIii1I11I1II1 % oO0o + IiII
if ( packet . inner_source . is_local ( ) and
packet . udp_dport == lisp . LISP_CTRL_PORT ) : return
if 51 - 51: I1Ii111 * II111iiii % ooOoO0o
if 98 - 98: OoO0O00 . I11i % II111iiii
if 71 - 71: I1Ii111 % i1IIi - II111iiii - OOooOOo + OOooOOo * ooOoO0o
if 51 - 51: iIii1I11I1II1 / OoOoOO00 + OOooOOo - I11i + iII111i
if 29 - 29: o0oOOo0O0Ooo % iIii1I11I1II1 . OoooooooOO % OoooooooOO % II111iiii / iII111i
if ( packet . inner_version == 4 ) :
packet . packet = lisp . lisp_ipv4_input ( packet . packet )
if ( packet . packet == None ) : return
packet . inner_ttl -= 1
elif ( packet . inner_version == 6 ) :
packet . packet = lisp . lisp_ipv6_input ( packet )
if ( packet . packet == None ) : return
packet . inner_ttl -= 1
else :
packet . packet = lisp . lisp_mac_input ( packet . packet )
if ( packet . packet == None ) : return
packet . encap_port = lisp . LISP_L2_DATA_PORT
if 70 - 70: i11iIiiIii % iII111i
if 11 - 11: IiII % I1ii11iIi11i % Ii1I / II111iiii % I1Ii111 - Oo0Ooo
if 96 - 96: I1ii11iIi11i / II111iiii . Ii1I - iII111i * I11i * oO0o
if 76 - 76: Ii1I - II111iiii * OOooOOo / OoooooooOO
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
if 71 - 71: OoooooooOO
if ( oo0O000OoO == False ) :
Oo0ooOo0o = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( Oo0ooOo0o and Oo0ooOo0o . dynamic_eid_configured == False ) :
lisp . dprint ( ( "Packet destined to local EID-prefix {}, " + "natively forwarding" ) . format ( Oo0ooOo0o . print_eid_tuple ( ) ) )
if 33 - 33: I1Ii111
packet . send_packet ( Ii1IIii11 , packet . inner_dest )
return
if 62 - 62: I1ii11iIi11i + Ii1I + i1IIi / OoooooooOO
if 7 - 7: o0oOOo0O0Ooo + i1IIi . I1IiiI / Oo0Ooo
if 22 - 22: ooOoO0o - ooOoO0o % OOooOOo . I1Ii111 + oO0o
if 63 - 63: I1IiiI % I1Ii111 * o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % iII111i
if 45 - 45: IiII
if 20 - 20: OoooooooOO * o0oOOo0O0Ooo * O0 . OOooOOo
OoO000O = lisp . lisp_map_cache_lookup ( packet . inner_source , packet . inner_dest )
if 94 - 94: OoOoOO00 . O0 / Ii1I . I1ii11iIi11i - i1IIi
if 26 - 26: OoO0O00 - OOooOOo . o0oOOo0O0Ooo
if 65 - 65: I1ii11iIi11i % O0 % iIii1I11I1II1 * Ii1I
if 31 - 31: Ii1I
if 44 - 44: OoOoOO00 - iIii1I11I1II1 - Oo0Ooo
if 80 - 80: iIii1I11I1II1 * I1Ii111 % I11i % Oo0Ooo
if 95 - 95: iIii1I11I1II1 - I1ii11iIi11i . I1Ii111 - I1IiiI
OOOOoo = Oo0ooOo0o . secondary_iid if ( Oo0ooOo0o != None ) else None
if ( OOOOoo and OoO000O and OoO000O . action == lisp . LISP_NATIVE_FORWARD_ACTION ) :
o000 = packet . inner_dest
o000 . instance_id = OOOOoo
OoO000O = lisp . lisp_map_cache_lookup ( packet . inner_source , o000 )
if 94 - 94: o0oOOo0O0Ooo + O0 / I11i . I1IiiI + OOooOOo . iIii1I11I1II1
if 62 - 62: OoOoOO00 / I1IiiI - I1ii11iIi11i - I1IiiI + i11iIiiIii + i1IIi
if 23 - 23: iII111i + I11i . OoOoOO00 * I1IiiI + I1ii11iIi11i
if 18 - 18: IiII * o0oOOo0O0Ooo . IiII / O0
if 8 - 8: o0oOOo0O0Ooo
if ( OoO000O == None or OoO000O . action == lisp . LISP_SEND_MAP_REQUEST_ACTION ) :
if ( lisp . lisp_rate_limit_map_request ( packet . inner_source ,
packet . inner_dest ) ) : return
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
packet . inner_source , packet . inner_dest , None )
return
if 4 - 4: I1ii11iIi11i + I1ii11iIi11i * ooOoO0o - OoOoOO00
if 78 - 78: Ii1I / II111iiii % OoOoOO00
if 52 - 52: OOooOOo - iII111i * oO0o
if 17 - 17: OoooooooOO + OOooOOo * I11i * OoOoOO00
if 36 - 36: O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
if ( OoO000O and OoO000O . is_active ( ) and OoO000O . has_ttl_elapsed ( ) ) :
lisp . lprint ( "Refresh map-cache entry {}" . format ( lisp . green ( OoO000O . print_eid_tuple ( ) , False ) ) )
if 46 - 46: ooOoO0o
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
packet . inner_source , packet . inner_dest , None )
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
if 61 - 61: OoooooooOO . oO0o . OoooooooOO / Oo0Ooo
if 72 - 72: i1IIi
if 82 - 82: OoOoOO00 + OoooooooOO / i11iIiiIii * I1ii11iIi11i . OoooooooOO
if 63 - 63: I1ii11iIi11i
OoO000O . stats . increment ( len ( packet . packet ) )
if 6 - 6: ooOoO0o / I1ii11iIi11i
if 57 - 57: I11i
if 67 - 67: OoO0O00 . ooOoO0o
if 87 - 87: oO0o % Ii1I
oo0OOOoOo , IIiiIIi1 , ooO000O , oO , III111iiIi1 = OoO000O . select_rloc ( packet , oO0oIIII )
if 29 - 29: OoooooooOO + Ii1I % iIii1I11I1II1 - OOooOOo . I1IiiI % Oo0Ooo
if 16 - 16: IiII / Oo0Ooo + OOooOOo / Ii1I
if ( oo0OOOoOo == None and III111iiIi1 == None ) :
if ( oO == lisp . LISP_NATIVE_FORWARD_ACTION ) :
lisp . dprint ( "Natively forwarding" )
packet . send_packet ( Ii1IIii11 , packet . inner_dest )
return
if 42 - 42: Oo0Ooo + II111iiii - I1IiiI / I11i % IiII
lisp . dprint ( "No reachable RLOCs found" )
return
if 66 - 66: OOooOOo + i1IIi . I1IiiI + OOooOOo - I11i
if ( oo0OOOoOo and oo0OOOoOo . is_null ( ) ) :
lisp . dprint ( "Drop action RLOC found" )
return
if 17 - 17: O0 . I1Ii111 . O0 + O0 / Oo0Ooo . ooOoO0o
if 62 - 62: I1ii11iIi11i % iII111i * OoO0O00 - i1IIi
if 66 - 66: i11iIiiIii / o0oOOo0O0Ooo - OoooooooOO / i1IIi . i11iIiiIii
if 16 - 16: Oo0Ooo % I1ii11iIi11i + I11i - O0 . iII111i / I1Ii111
if 35 - 35: oO0o / I1Ii111 / II111iiii - iIii1I11I1II1 + II111iiii . I1Ii111
packet . outer_tos = packet . inner_tos
packet . outer_ttl = packet . inner_ttl
if 81 - 81: iII111i * OOooOOo - I1ii11iIi11i * Ii1I % OoOoOO00 * OoOoOO00
if 59 - 59: iIii1I11I1II1
if 7 - 7: OOooOOo * I1IiiI / o0oOOo0O0Ooo * i11iIiiIii
if 84 - 84: OOooOOo . iII111i
if ( oo0OOOoOo ) :
packet . outer_dest . copy_address ( oo0OOOoOo )
II1i111 = packet . outer_dest . afi_to_version ( )
packet . outer_version = II1i111
i1iiiIii11 = lisp . lisp_myrlocs [ 0 ] if ( II1i111 == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 67 - 67: o0oOOo0O0Ooo % OoOoOO00 . OoOoOO00 - ooOoO0o
packet . outer_source . copy_address ( i1iiiIii11 )
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 40 - 40: ooOoO0o / OoOoOO00 % i11iIiiIii % I1ii11iIi11i / I1IiiI
if 62 - 62: i1IIi - OoOoOO00
if 62 - 62: i1IIi + Oo0Ooo % IiII
if ( packet . encode ( ooO000O ) == None ) : return
if ( len ( packet . packet ) <= 1500 ) : packet . print_packet ( "Send" , True )
if 28 - 28: I1ii11iIi11i . i1IIi
if 10 - 10: OoO0O00 / Oo0Ooo
if 15 - 15: iII111i . OoOoOO00 / iII111i * I11i - I1IiiI % I1ii11iIi11i
if 57 - 57: O0 % OoOoOO00 % oO0o
iI1iii = Oooo0000 if II1i111 == 6 else Ii1IIii11
packet . send_packet ( iI1iii , packet . outer_dest )
if 87 - 87: I1ii11iIi11i / OoooooooOO - Oo0Ooo % OoOoOO00 % IiII % Oo0Ooo
elif ( III111iiIi1 ) :
if 29 - 29: OoooooooOO . I1IiiI % I1ii11iIi11i - iII111i
if 8 - 8: i1IIi
if 32 - 32: oO0o / II111iiii
if 45 - 45: I1ii11iIi11i + OoO0O00 * i11iIiiIii / OOooOOo % I11i * O0
if 17 - 17: O0
OOooO0o = III111iiIi1 . rle_nodes [ 0 ] . level
ii1iI1iI1 = len ( packet . packet )
for o00oOOO in III111iiIi1 . rle_forwarding_list :
if ( o00oOOO . level != OOooO0o ) : return
if 57 - 57: I1IiiI - o0oOOo0O0Ooo + OoO0O00 % Oo0Ooo
packet . outer_dest . copy_address ( o00oOOO . address )
if ( II1i1i1iII1 ) : packet . inner_dest . instance_id = 0xffffff
II1i111 = packet . outer_dest . afi_to_version ( )
packet . outer_version = II1i111
i1iiiIii11 = lisp . lisp_myrlocs [ 0 ] if ( II1i111 == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 26 - 26: iII111i . iII111i
packet . outer_source . copy_address ( i1iiiIii11 )
if 35 - 35: I1Ii111 . OoOoOO00 * i11iIiiIii
if ( packet . encode ( None ) == None ) : return
if 44 - 44: i11iIiiIii / Oo0Ooo
if 42 - 42: OoooooooOO + Oo0Ooo % II111iiii + OoO0O00
if 24 - 24: iII111i * II111iiii % iII111i % IiII + OoooooooOO
if 29 - 29: II111iiii - OoooooooOO - i11iIiiIii . o0oOOo0O0Ooo
packet . print_packet ( "Replicate-to-L{}" . format ( o00oOOO . level ) , True )
packet . send_packet ( Ii1IIii11 , packet . outer_dest )
if 19 - 19: II111iiii
if 72 - 72: OoooooooOO / I1IiiI + Ii1I / OoOoOO00 * Ii1I
if 34 - 34: O0 * O0 % OoooooooOO + iII111i * iIii1I11I1II1 % Ii1I
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
oo00ooOoo = len ( packet . packet ) - ii1iI1iI1
packet . packet = packet . packet [ oo00ooOoo : : ]
if 28 - 28: Ii1I
if 1 - 1: Ii1I
if 48 - 48: O0 + O0 . I1Ii111 - ooOoO0o
if 63 - 63: oO0o
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if 36 - 36: IiII
del ( packet )
return
if 49 - 49: OOooOOo / OoooooooOO / I1IiiI
if 74 - 74: I1Ii111 % I1ii11iIi11i
if 7 - 7: II111iiii
if 27 - 27: oO0o . OoooooooOO + i11iIiiIii
if 86 - 86: I11i / o0oOOo0O0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i + oO0o
if 33 - 33: o0oOOo0O0Ooo . iII111i . IiII . i1IIi
if 49 - 49: I1ii11iIi11i
def O0oOOo0o ( device , not_used , packet ) :
if 50 - 50: iII111i . I1ii11iIi11i . OoO0O00 * I11i + II111iiii % i11iIiiIii
i1i1IiIiIi1Ii = 4 if device == "lo0" else 14
if 64 - 64: OOooOOo + OoooooooOO * OoooooooOO
if ( lisp . lisp_frame_logging ) :
i1I = lisp . bold ( "Received frame on interface '{}'" . format ( device ) ,
False )
iiI1I1IIi11i1 = lisp . lisp_format_packet ( packet [ 0 : 64 ] )
lisp . lprint ( "{}: {}" . format ( i1I , iiI1I1IIi11i1 ) )
if 45 - 45: ooOoO0o % o0oOOo0O0Ooo - ooOoO0o
if 31 - 31: IiII / i11iIiiIii
if 83 - 83: I1ii11iIi11i / I1Ii111 - i11iIiiIii . iIii1I11I1II1 + Oo0Ooo
if 59 - 59: O0 % Oo0Ooo
if 92 - 92: Ii1I % iII111i / I1ii11iIi11i % I1ii11iIi11i * I1IiiI
Oo = ""
oO00oOOo0Oo = False
OOo0oO00ooO00 = device
if ( i1i1IiIiIi1Ii == 14 ) :
iIiIIi1 , IIiIIIIii , iI , oO00oOOo0Oo = lisp . lisp_get_input_interface ( packet )
OOo0oO00ooO00 = device if ( device in iIiIIi1 ) else iIiIIi1 [ 0 ]
Oo = lisp . lisp_format_macs ( IIiIIIIii , iI )
if ( OOo0oO00ooO00 . find ( "vlan" ) != - 1 ) : i1i1IiIiIi1Ii += 4
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
if 31 - 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
if 6 - 6: IiII * i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + o0oOOo0O0Ooo / i1IIi
if 53 - 53: I11i + iIii1I11I1II1
if ( int ( iI [ 1 ] , 16 ) & 1 ) : oO00oOOo0Oo = True
if 70 - 70: I1ii11iIi11i
if 67 - 67: OoooooooOO
if 29 - 29: O0 - i11iIiiIii - II111iiii + OOooOOo * IiII
if 2 - 2: i1IIi - ooOoO0o + I1IiiI . o0oOOo0O0Ooo * o0oOOo0O0Ooo / OoOoOO00
if 93 - 93: i1IIi
ooOOOo = struct . unpack ( "H" , packet [ i1i1IiIiIi1Ii - 2 : i1i1IiIiIi1Ii ] ) [ 0 ]
ooOOOo = socket . ntohs ( ooOOOo )
if ( ooOOOo == 0x8100 ) :
OO000oOoo0O = struct . unpack ( "I" , packet [ i1i1IiIiIi1Ii : i1i1IiIiIi1Ii + 4 ] ) [ 0 ]
OO000oOoo0O = socket . ntohl ( OO000oOoo0O )
OOo0oO00ooO00 = "vlan" + str ( OO000oOoo0O >> 16 )
i1i1IiIiIi1Ii += 4
elif ( ooOOOo == 0x806 ) :
lisp . dprint ( "Dropping ARP packets, host should have default route" )
return
if 9 - 9: oO0o * i1IIi - i1IIi
if 16 - 16: I1IiiI * i1IIi - o0oOOo0O0Ooo . IiII % I11i / o0oOOo0O0Ooo
if ( lisp . lisp_l2_overlay ) : i1i1IiIiIi1Ii = 0
if 14 - 14: iIii1I11I1II1 * I1Ii111 * I1ii11iIi11i / iIii1I11I1II1 * IiII / I11i
IIIii ( packet [ i1i1IiIiIi1Ii : : ] , device , OOo0oO00ooO00 , Oo , oO00oOOo0Oo )
return
if 77 - 77: OoO0O00 + I1Ii111 + I1Ii111 * Ii1I / OoooooooOO . Ii1I
if 62 - 62: i1IIi - i1IIi
if 69 - 69: OoOoOO00 % oO0o - I11i
if 38 - 38: iIii1I11I1II1 + i11iIiiIii / i11iIiiIii % OoO0O00 / ooOoO0o % Ii1I
if 7 - 7: IiII * I1IiiI + i1IIi + i11iIiiIii + Oo0Ooo % I1IiiI
if 62 - 62: o0oOOo0O0Ooo - Ii1I * OoOoOO00 - i11iIiiIii % ooOoO0o
if 52 - 52: I1ii11iIi11i % oO0o - i11iIiiIii
if 30 - 30: iII111i / OoO0O00 + oO0o
if 6 - 6: iII111i . I11i + Ii1I . I1Ii111
if 70 - 70: OoO0O00
if 46 - 46: I11i - i1IIi
if 46 - 46: I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if 87 - 87: OoO0O00 % I1IiiI
if 77 - 77: iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if 50 - 50: ooOoO0o + i1IIi
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
def OO0oOOoo ( sources , dyn_eids ) :
if ( os . getenv ( "LISP_NO_IPTABLES" ) != None ) :
lisp . lprint ( "User selected to suppress installing iptables rules" )
return
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
os . system ( "sudo iptables -t raw -N lisp" )
os . system ( "sudo iptables -t raw -A PREROUTING -j lisp" )
os . system ( "sudo ip6tables -t raw -N lisp" )
os . system ( "sudo ip6tables -t raw -A PREROUTING -j lisp" )
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
o00Ooo0 = "sudo ip{}tables -t raw -A lisp -j ACCEPT -d {}"
O0O00O = [ "127.0.0.1" , "::1" , "224.0.0.0/4 -p igmp" , "ff00::/8" ,
"fe80::/16" ]
O0O00O += sources + lisp . lisp_get_all_addresses ( )
for iIi1Ii in O0O00O :
IiI1IIIII1I = "" if iIi1Ii . find ( ":" ) == - 1 else "6"
os . system ( o00Ooo0 . format ( IiI1IIIII1I , iIi1Ii ) )
if 35 - 35: Ii1I - Ii1I + i1IIi - O0 - I1Ii111
if 58 - 58: OoOoOO00 - iII111i - OoooooooOO
if 96 - 96: iIii1I11I1II1
if 82 - 82: OoOoOO00 + O0 - IiII % oO0o * i11iIiiIii
if 15 - 15: o0oOOo0O0Ooo
if 39 - 39: OOooOOo / I1ii11iIi11i / I1IiiI * I1Ii111
if 44 - 44: O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / O0 - I11i
if 83 - 83: IiII * I11i / Oo0Ooo
if ( lisp . lisp_pitr == False ) :
o00Ooo0 = "sudo ip{}tables -t raw -A lisp -j ACCEPT -s {} -d {}"
iIIIiI = "sudo ip{}tables -t raw -C lisp -j ACCEPT -s {} -d {}"
for i1oOOoo0o0OOOO in sources :
if ( i1oOOoo0o0OOOO in dyn_eids ) : continue
IiI1IIIII1I = "" if i1oOOoo0o0OOOO . find ( ":" ) == - 1 else "6"
for O0OOO0OOoO0O in sources :
if ( O0OOO0OOoO0O in dyn_eids ) : continue
if ( O0OOO0OOoO0O . find ( "." ) != - 1 and i1oOOoo0o0OOOO . find ( "." ) == - 1 ) : continue
if ( O0OOO0OOoO0O . find ( ":" ) != - 1 and i1oOOoo0o0OOOO . find ( ":" ) == - 1 ) : continue
if ( commands . getoutput ( iIIIiI . format ( IiI1IIIII1I , i1oOOoo0o0OOOO , O0OOO0OOoO0O ) ) == "" ) :
continue
if 93 - 93: ooOoO0o . iIii1I11I1II1 % i11iIiiIii . OoOoOO00 % ooOoO0o + O0
os . system ( o00Ooo0 . format ( IiI1IIIII1I , i1oOOoo0o0OOOO , O0OOO0OOoO0O ) )
if 65 - 65: Ii1I + OoO0O00 - OoooooooOO
if 51 - 51: Oo0Ooo + oO0o / iII111i - i1IIi
if 51 - 51: Oo0Ooo - I1ii11iIi11i * I11i
if 12 - 12: iIii1I11I1II1 % ooOoO0o % ooOoO0o
if 78 - 78: IiII . OoOoOO00 . I11i
if 97 - 97: oO0o
if 80 - 80: I1IiiI . Ii1I
I1I11ii = "sudo ip{}tables -t raw -A lisp -j DROP -s {}"
for i1oOOoo0o0OOOO in sources :
IiI1IIIII1I = "" if i1oOOoo0o0OOOO . find ( ":" ) == - 1 else "6"
os . system ( I1I11ii . format ( IiI1IIIII1I , i1oOOoo0o0OOOO ) )
if 93 - 93: I1ii11iIi11i % OoOoOO00 . O0 / iII111i * oO0o
if 29 - 29: o0oOOo0O0Ooo
if 86 - 86: II111iiii . IiII
if 2 - 2: OoooooooOO
if 60 - 60: OoO0O00
oO00Ooo0oO = commands . getoutput ( "sudo iptables -t raw -S lisp" ) . split ( "\n" )
oO00Ooo0oO += commands . getoutput ( "sudo ip6tables -t raw -S lisp" ) . split ( "\n" )
lisp . lprint ( "Using kernel filters: {}" . format ( oO00Ooo0oO ) )
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
if 29 - 29: O0 . I1Ii111
if 66 - 66: oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - ooOoO0o - IiII
if 70 - 70: I1Ii111 + oO0o
if 93 - 93: I1Ii111 + Ii1I
if 33 - 33: O0
if ( os . getenv ( "LISP_VIRTIO_BUG" ) != None ) :
oo0oO = ( "sudo iptables -A POSTROUTING -t mangle -p tcp -j " + "CHECKSUM --checksum-fill; " )
if 50 - 50: OoooooooOO - iIii1I11I1II1 + i1IIi % I1Ii111 - iIii1I11I1II1 % O0
oo0oO += ( "sudo iptables -A POSTROUTING -t mangle -p udp -j " + "CHECKSUM --checksum-fill; " )
if 58 - 58: IiII + iIii1I11I1II1
oo0oO += ( "sudo ip6tables -A POSTROUTING -t mangle -p tcp -j " + "CHECKSUM --checksum-fill; " )
if 65 - 65: II111iiii - I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 * iII111i + Ii1I
oo0oO += ( "sudo ip6tables -A POSTROUTING -t mangle -p udp -j " + "CHECKSUM --checksum-fill" )
if 79 - 79: ooOoO0o . OoOoOO00 % I1Ii111 - Oo0Ooo
os . system ( oo0oO )
o0oO0oO0O = lisp . bold ( "virtio" , False )
lisp . lprint ( "{} bug workaround, configure '{}'" . format ( o0oO0oO0O , oo0oO ) )
if 18 - 18: Oo0Ooo
return
if 20 - 20: oO0o * O0 + I11i - OoooooooOO . I11i
if 60 - 60: o0oOOo0O0Ooo . o0oOOo0O0Ooo / iII111i
if 45 - 45: O0 . i11iIiiIii % iII111i . OoOoOO00 % IiII % iIii1I11I1II1
if 58 - 58: iIii1I11I1II1 . OoOoOO00 - i11iIiiIii * iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 80 - 80: I1ii11iIi11i / iIii1I11I1II1 % OoOoOO00
if 80 - 80: OoO0O00 % iII111i
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
def o00oOo0oOoo ( sources , dyn_eids , l2_overlay , pitr ) :
if ( l2_overlay ) :
i1I11IiI1iiII = "ether[6:4] >= 0 and ether[10:2] >= 0"
lisp . lprint ( "Using pcap filter: '{}'" . format ( i1I11IiI1iiII ) )
return ( i1I11IiI1iiII )
if 13 - 13: OoO0O00
if 70 - 70: I1Ii111 + O0 . oO0o * Ii1I
ii = "(not ether proto 0x806)"
I1Iiiiiii = " or (udp src port 4342 and ip[28] == 0x28)"
II111iIII1Ii = " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
if 24 - 24: o0oOOo0O0Ooo
iIi1Iii111I = ""
IIi11i11 = ""
for i1oOOoo0o0OOOO in sources :
iIi1Iii111I += "{}" . format ( i1oOOoo0o0OOOO )
if ( i1oOOoo0o0OOOO not in dyn_eids ) : IIi11i11 += "{}" . format ( i1oOOoo0o0OOOO )
if ( sources [ - 1 ] == i1oOOoo0o0OOOO ) : break
iIi1Iii111I += " or "
if ( i1oOOoo0o0OOOO not in dyn_eids ) : IIi11i11 += " or "
if 18 - 18: iIii1I11I1II1 + I11i * I1IiiI - OOooOOo / I1IiiI
if ( IIi11i11 [ - 4 : : ] == " or " ) : IIi11i11 = IIi11i11 [ 0 : - 4 ]
if 78 - 78: I11i . IiII
if 38 - 38: OoOoOO00 + IiII
if 15 - 15: Oo0Ooo + I11i . ooOoO0o - iIii1I11I1II1 / O0 % iIii1I11I1II1
if 86 - 86: I1IiiI / oO0o * Ii1I
if 64 - 64: ooOoO0o / O0 * OoOoOO00 * ooOoO0o
if 60 - 60: I11i / i1IIi % I1ii11iIi11i / I1ii11iIi11i * I1ii11iIi11i . i11iIiiIii
o0oOO00 = commands . getoutput ( "egrep 'lisp-nat = yes' ./lisp.config" )
o0oOO00 = ( o0oOO00 != "" and o0oOO00 [ 0 ] == " " )
ii11iiIi = lisp . lisp_get_loopback_address ( ) if ( o0oOO00 ) else None
if 48 - 48: IiII % I11i
i1I1III1i1i = ""
i1I11 = lisp . lisp_get_all_addresses ( )
for iIi1Ii in i1I11 :
if ( iIi1Ii == ii11iiIi ) : continue
i1I1III1i1i += "{}" . format ( iIi1Ii )
if ( i1I11 [ - 1 ] == iIi1Ii ) : break
i1I1III1i1i += " or "
if 9 - 9: O0 % OOooOOo * iIii1I11I1II1 * oO0o + OoooooooOO + I1ii11iIi11i
if 7 - 7: ooOoO0o / iIii1I11I1II1 / I1Ii111 + ooOoO0o - i1IIi
if ( iIi1Iii111I != "" ) :
iIi1Iii111I = " and (src net {})" . format ( iIi1Iii111I )
if 75 - 75: II111iiii + OOooOOo
if ( IIi11i11 != "" ) :
IIi11i11 = " and not (dst net {})" . format ( IIi11i11 )
if 28 - 28: I1IiiI
if ( i1I1III1i1i != "" ) :
i1I1III1i1i = " and not (dst host {})" . format ( i1I1III1i1i )
if 49 - 49: I11i . o0oOOo0O0Ooo % oO0o / Ii1I
if 95 - 95: O0 * OoOoOO00 * IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if 79 - 79: OoOoOO00 / ooOoO0o
if 77 - 77: Oo0Ooo
if ( pitr ) :
IIi11i11 = ""
i1I1III1i1i = i1I1III1i1i . replace ( "dst " , "" )
if 46 - 46: I1Ii111
if 72 - 72: iII111i * OOooOOo
if 67 - 67: i1IIi
if 5 - 5: II111iiii . OoooooooOO
if 57 - 57: I1IiiI
i1I11IiI1iiII = ii + iIi1Iii111I + IIi11i11 + i1I1III1i1i
i1I11IiI1iiII += I1Iiiiiii
i1I11IiI1iiII += II111iIII1Ii
if 35 - 35: OoooooooOO - I1Ii111 / OoO0O00
lisp . lprint ( "Using pcap filter: '{}'" . format ( i1I11IiI1iiII ) )
return ( i1I11IiI1iiII )
if 50 - 50: OoOoOO00
if 33 - 33: I11i
if 98 - 98: OoOoOO00 % II111iiii
if 95 - 95: iIii1I11I1II1 - I1Ii111 - OOooOOo + I1Ii111 % I1ii11iIi11i . I1IiiI
if 41 - 41: O0 + oO0o . i1IIi - II111iiii * o0oOOo0O0Ooo . OoO0O00
if 68 - 68: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 - I1Ii111
def iIi1 ( device , pfilter , pcap_lock ) :
lisp . lisp_set_exception ( )
if 37 - 37: IiII
pcap_lock . acquire ( )
iI11i = pcappy . open_live ( device , 9000 , 0 , 100 )
pcap_lock . release ( )
if 73 - 73: iII111i * iII111i / ooOoO0o
iI11i . filter = pfilter
iI11i . loop ( - 1 , O0oOOo0o , device )
return
if 43 - 43: I1ii11iIi11i . i1IIi . IiII + O0 * Ii1I * O0
if 41 - 41: I1ii11iIi11i + Ii1I % OoooooooOO . I1ii11iIi11i + iII111i . iII111i
if 31 - 31: i11iIiiIii + II111iiii . iII111i * OoOoOO00
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
if 6 - 6: iIii1I11I1II1 * OoooooooOO
if 28 - 28: Oo0Ooo * o0oOOo0O0Ooo / I1Ii111
if 52 - 52: O0 / o0oOOo0O0Ooo % iII111i * I1IiiI % OOooOOo
if 69 - 69: I1ii11iIi11i
def oOOO0ooo ( ) :
global I11
global II1Ii1iI1i
global II1iII1i
if 19 - 19: iII111i - o0oOOo0O0Ooo - Ii1I - OoOoOO00 . iII111i . I1Ii111
lisp . lisp_set_exception ( )
if 48 - 48: iII111i + IiII
if 60 - 60: I11i + iII111i . IiII / i1IIi . iIii1I11I1II1
if 14 - 14: OOooOOo
if 79 - 79: Ii1I
if 76 - 76: iIii1I11I1II1
Ooi111i1iIi1 = [ II1Ii1iI1i , II1Ii1iI1i ,
oO0oIIII ]
lisp . lisp_build_info_requests ( Ooi111i1iIi1 , None , lisp . LISP_CTRL_PORT )
if 95 - 95: OoooooooOO + I11i - I1ii11iIi11i / I1ii11iIi11i . i1IIi . OoooooooOO
if 29 - 29: ooOoO0o - i1IIi . I11i - I1ii11iIi11i + ooOoO0o + OoooooooOO
if 36 - 36: i1IIi / ooOoO0o . iIii1I11I1II1
if 12 - 12: Ii1I
I11 . cancel ( )
I11 = threading . Timer ( lisp . LISP_INFO_INTERVAL ,
oOOO0ooo , [ ] )
I11 . start ( )
return
if 71 - 71: I1IiiI . II111iiii . I1IiiI - ooOoO0o
if 45 - 45: IiII / O0 / OoOoOO00 * OOooOOo
if 18 - 18: iIii1I11I1II1 + OOooOOo + iIii1I11I1II1 . I1ii11iIi11i + I1Ii111 . ooOoO0o
if 7 - 7: I1ii11iIi11i + iIii1I11I1II1 * I11i * I11i / II111iiii - Ii1I
if 65 - 65: oO0o + OoOoOO00 + II111iiii
if 77 - 77: II111iiii
if 50 - 50: O0 . O0 . ooOoO0o % Oo0Ooo
def ooo000oOO ( kv_pair ) :
global II1iII1i
global iiI1iIiI
global I11
if 27 - 27: o0oOOo0O0Ooo * i11iIiiIii * OoO0O00
lispconfig . lisp_map_resolver_command ( kv_pair )
if 92 - 92: Oo0Ooo / i11iIiiIii + I1ii11iIi11i
if ( lisp . lisp_test_mr_timer == None or
lisp . lisp_test_mr_timer . is_alive ( ) == False ) :
lisp . lisp_test_mr_timer = threading . Timer ( 2 , lisp . lisp_test_mr ,
[ II1iII1i , iiI1iIiI ] )
lisp . lisp_test_mr_timer . start ( )
if 87 - 87: OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
I11 = threading . Timer ( 0 , oOOO0ooo , [ ] )
I11 . start ( )
return
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
if 71 - 71: i1IIi - I11i * I1Ii111 + oO0o - OoO0O00 % I1ii11iIi11i
if 63 - 63: iIii1I11I1II1 + OOooOOo . OoO0O00 / I1IiiI
def oO0O ( kv_pair ) :
lispconfig . lisp_database_mapping_command ( kv_pair )
return
if 26 - 26: iIii1I11I1II1 + i1IIi / OoOoOO00 % I1ii11iIi11i
if 44 - 44: OoooooooOO . II111iiii . OOooOOo % OoooooooOO
if 86 - 86: i11iIiiIii + O0 * IiII - OoO0O00 * OOooOOo + O0
if 95 - 95: iIii1I11I1II1 . I1Ii111 % iII111i - I1Ii111 * II111iiii
if 89 - 89: iII111i . I1IiiI
if 59 - 59: i1IIi % iIii1I11I1II1 + OoooooooOO
if 97 - 97: I1ii11iIi11i / Oo0Ooo + I1Ii111
if 32 - 32: ooOoO0o % I1Ii111 * Oo0Ooo
def O0O000oOo0O ( kv_pair ) :
global i111I
if 82 - 82: IiII
if 86 - 86: Oo0Ooo * II111iiii * O0
if 83 - 83: IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
OoOOoooO000 = lisp . lisp_nat_traversal
OoO0o000oOo = lisp . lisp_rloc_probing
if 88 - 88: i1IIi * I1Ii111 * oO0o - ooOoO0o * I11i / OoooooooOO
if 41 - 41: O0 / I1Ii111 + iIii1I11I1II1
if 72 - 72: OoOoOO00 * iIii1I11I1II1 % I11i
if 20 - 20: II111iiii % iIii1I11I1II1 + oO0o * II111iiii * OoO0O00 % OoO0O00
lispconfig . lisp_xtr_command ( kv_pair )
if 15 - 15: oO0o / I1Ii111
if 37 - 37: i11iIiiIii + I1IiiI . OOooOOo % I11i % I11i
if 26 - 26: O0
if 34 - 34: ooOoO0o * I1Ii111
OooOoOO0OO = ( OoOOoooO000 == False and lisp . lisp_nat_traversal and lisp . lisp_rloc_probing )
if 27 - 27: IiII * I1IiiI . iIii1I11I1II1 - iIii1I11I1II1
i111i1I1ii1i = ( OoO0o000oOo == False and lisp . lisp_rloc_probing )
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
o0oO0OO00oo0o = 0
if ( i111i1I1ii1i ) : o0oO0OO00oo0o = 1
if ( OooOoOO0OO ) : o0oO0OO00oo0o = 5
if 17 - 17: IiII / I1ii11iIi11i - o0oOOo0O0Ooo * I1ii11iIi11i
if ( o0oO0OO00oo0o != 0 ) :
i11i11II11i = [ i111I , i111I ]
lisp . lisp_start_rloc_probe_timer ( o0oO0OO00oo0o , i11i11II11i )
if 9 - 9: OoOoOO00 - I1ii11iIi11i * ooOoO0o . ooOoO0o - I1IiiI
if 74 - 74: I1ii11iIi11i * i11iIiiIii / I1IiiI - O0 . ooOoO0o
if 39 - 39: ooOoO0o / O0 * IiII
if 17 - 17: Ii1I / iIii1I11I1II1 - OoO0O00 + I1IiiI % OOooOOo
if 14 - 14: o0oOOo0O0Ooo % IiII + I1ii11iIi11i + OoO0O00
if 76 - 76: OoO0O00 - i11iIiiIii + OoOoOO00 + OOooOOo / OoooooooOO
if 50 - 50: II111iiii - I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1
if ( lisp . lisp_crypto_ephem_port == None and lisp . lisp_data_plane_security ) :
i1IiII1III = i111I . getsockname ( ) [ 1 ]
lisp . lisp_crypto_ephem_port = i1IiII1III
lisp . lprint ( "Use port {} for lisp-crypto packets" . format ( i1IiII1III ) )
OoooooOo = { "type" : "itr-crypto-port" , "port" : i1IiII1III }
lisp . lisp_write_to_dp_socket ( OoooooOo )
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if 88 - 88: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO
lisp . lisp_ipc_write_xtr_parameters ( lisp . lisp_debug_logging ,
lisp . lisp_data_plane_logging )
return
if 48 - 48: ooOoO0o * OoOoOO00 - ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
def OOo00ooOoO0o ( ipc ) :
i1i1iiIIiiiII , Ii1I1 , OO0ooO0 , ooO000O = ipc . split ( "%" )
ooO000O = int ( ooO000O , 16 )
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
oOo0OO0o0 = lisp . lisp_get_echo_nonce ( None , OO0ooO0 )
if ( oOo0OO0o0 == None ) : oOo0OO0o0 = lisp . lisp_echo_nonce ( OO0ooO0 )
if 35 - 35: Oo0Ooo . Oo0Ooo % OoooooooOO - Ii1I
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if ( Ii1I1 == "R" ) :
oOo0OO0o0 . request_nonce_rcvd = ooO000O
oOo0OO0o0 . last_request_nonce_rcvd = lisp . lisp_get_timestamp ( )
oOo0OO0o0 . echo_nonce_sent = ooO000O
oOo0OO0o0 . last_new_echo_nonce_sent = lisp . lisp_get_timestamp ( )
lisp . lprint ( "Start echo-nonce mode for {}, nonce 0x{}" . format ( lisp . red ( oOo0OO0o0 . rloc_str , False ) , lisp . lisp_hex_string ( ooO000O ) ) )
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
if ( Ii1I1 == "E" ) :
oOo0OO0o0 . echo_nonce_rcvd = ooO000O
oOo0OO0o0 . last_echo_nonce_rcvd = lisp . lisp_get_timestamp ( )
if 78 - 78: iIii1I11I1II1 * Oo0Ooo . Oo0Ooo - OOooOOo . iIii1I11I1II1
if ( oOo0OO0o0 . request_nonce_sent == ooO000O ) :
I111I1I = lisp . bold ( "echoed nonce" , False )
lisp . lprint ( "Received {} {} from {}" . format ( I111I1I ,
lisp . lisp_hex_string ( ooO000O ) ,
lisp . red ( oOo0OO0o0 . rloc_str , False ) ) )
if 54 - 54: II111iiii + I11i % I11i % o0oOOo0O0Ooo
oOo0OO0o0 . request_nonce_sent = None
lisp . lprint ( "Stop request-nonce mode for {}" . format ( lisp . red ( oOo0OO0o0 . rloc_str , False ) ) )
if 25 - 25: iII111i - Oo0Ooo
oOo0OO0o0 . last_good_echo_nonce_rcvd = lisp . lisp_get_timestamp ( )
else :
Iii1IIIIIII = "none"
if ( oOo0OO0o0 . request_nonce_sent ) :
Iii1IIIIIII = lisp . lisp_hex_string ( oOo0OO0o0 . request_nonce_sent )
if 27 - 27: OoO0O00 + OoOoOO00 * ooOoO0o
lisp . lprint ( ( "Received echo-nonce 0x{} from {}, but request-" + "nonce is {}" ) . format ( lisp . lisp_hex_string ( ooO000O ) ,
# iIii1I11I1II1 . iIii1I11I1II1 % IiII % i1IIi . OoOoOO00
lisp . red ( oOo0OO0o0 . rloc_str , False ) , Iii1IIIIIII ) )
if 75 - 75: ooOoO0o + OoO0O00 - I1ii11iIi11i . OoooooooOO . ooOoO0o + I1IiiI
if 49 - 49: I1ii11iIi11i . IiII . i1IIi * OoOoOO00 % iIii1I11I1II1
return
if 35 - 35: I1ii11iIi11i + I1Ii111 - OoOoOO00 % oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
if 24 - 24: ooOoO0o - I11i * oO0o
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
o0O0OOo0oO = {
"lisp xtr-parameters" : [ O0O000oOo0O , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-xtr" : [ True , "yes" , "no" ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp interface" : [ lispconfig . lisp_interface_command , {
"interface-name" : [ True ] ,
"device" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"dynamic-eid" : [ True ] ,
"multi-tenant-eid" : [ True ] ,
"lisp-nat" : [ True , "yes" , "no" ] ,
"dynamic-eid-device" : [ True ] ,
"dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] ,
"lisp map-resolver" : [ ooo000oOO , {
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"dns-name" : [ True ] ,
"address" : [ True ] } ] ,
"lisp database-mapping" : [ oO0O , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"send-map-request" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp itr-map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp geo-coordinates" : [ lispconfig . lisp_geo_command , {
"geo-name" : [ False ] ,
"geo-tag" : [ False ] } ] ,
"show itr-map-cache" : [ IIiiIiI1 , { } ] ,
"show itr-rloc-probing" : [ I1i1iii , { } ] ,
"show itr-keys" : [ oo , { } ] ,
"show itr-dynamic-eid" : [ lispconfig . lisp_show_dynamic_eid_command , { } ]
}
if 42 - 42: II111iiii / O0 . iIii1I11I1II1 / O0 / OoO0O00 / OoooooooOO
if 62 - 62: O0 . Oo0Ooo
if 33 - 33: Oo0Ooo / iIii1I11I1II1 % i1IIi
if 76 - 76: Ii1I + iIii1I11I1II1 + OoOoOO00 . OoO0O00
if 49 - 49: IiII / ooOoO0o / OOooOOo
if 25 - 25: I1IiiI % O0 + i1IIi - ooOoO0o
if ( I111I1Iiii1i ( ) == False ) :
lisp . lprint ( "lisp_itr_startup() failed" )
lisp . lisp_print_banner ( "ITR abnormal exit" )
exit ( 1 )
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
if 94 - 94: iII111i - Oo0Ooo + oO0o
O0oooOoO = [ i111I , oO0oIIII ,
II1Ii1iI1i , Oo0oO0oo0oO00 ]
if 62 - 62: OOooOOo / II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
if 36 - 36: OOooOOo % i11iIiiIii
Iiii1Ii = True
ooOOo00oo0 = [ i111I ] * 3
IIIII1Ii = [ II1Ii1iI1i ] * 3
if 13 - 13: II111iiii
while ( True ) :
try : o0o000Oo , oO0o0O0o0OO00 , i1i1iiIIiiiII = select . select ( O0oooOoO , [ ] , [ ] )
except : break
if 23 - 23: OoO0O00 + i11iIiiIii
if 20 - 20: I1ii11iIi11i
if 3 - 3: OoO0O00 * i1IIi . I1IiiI . O0 - OoOoOO00
if 81 - 81: I1IiiI - iIii1I11I1II1 / I1IiiI / O0
if ( lisp . lisp_ipc_data_plane and Oo0oO0oo0oO00 in o0o000Oo ) :
lisp . lisp_process_punt ( Oo0oO0oo0oO00 , II1iII1i ,
iiI1iIiI )
if 34 - 34: Ii1I * Ii1I - I1ii11iIi11i - O0 . i11iIiiIii
if 32 - 32: iIii1I11I1II1 . OoO0O00 * oO0o / OOooOOo . II111iiii - Oo0Ooo
if 10 - 10: I1ii11iIi11i / i11iIiiIii - Ii1I + oO0o * I1IiiI
if 94 - 94: I1IiiI + iIii1I11I1II1 / O0 - OoooooooOO % I1ii11iIi11i
if 64 - 64: I11i + OoO0O00
if ( i111I in o0o000Oo ) :
Ii1I1 , i1oOOoo0o0OOOO , i1IiII1III , Ii = lisp . lisp_receive ( ooOOo00oo0 [ 0 ] ,
False )
if ( i1oOOoo0o0OOOO == "" ) : break
if 44 - 44: I1IiiI % Ii1I * I1IiiI . Oo0Ooo + I1ii11iIi11i . OOooOOo
if ( lisp . lisp_is_rloc_probe_reply ( Ii [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe reply, using pcap" )
continue
if 6 - 6: IiII * OoooooooOO + I1Ii111 / Ii1I
lisp . lisp_parse_packet ( ooOOo00oo0 , Ii , i1oOOoo0o0OOOO , i1IiII1III )
if 35 - 35: ooOoO0o % I1IiiI - ooOoO0o - OoO0O00 - OoooooooOO
if 46 - 46: i1IIi . i1IIi . oO0o / I11i / ooOoO0o
if 34 - 34: OoooooooOO / Oo0Ooo * i11iIiiIii . II111iiii . OoooooooOO
if 59 - 59: i11iIiiIii . OoooooooOO / I11i * I1ii11iIi11i + OoooooooOO
if 3 - 3: i11iIiiIii * Oo0Ooo % iIii1I11I1II1 % I1IiiI * iII111i / OOooOOo
if ( II1Ii1iI1i in o0o000Oo ) :
Ii1I1 , i1oOOoo0o0OOOO , i1IiII1III , Ii = lisp . lisp_receive ( IIIII1Ii [ 0 ] ,
False )
if ( i1oOOoo0o0OOOO == "" ) : break
if 95 - 95: IiII * O0 * I1Ii111 . OoooooooOO % Oo0Ooo + I1ii11iIi11i
if ( lisp . lisp_is_rloc_probe_reply ( Ii [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe reply, using pcap" )
continue
if 98 - 98: oO0o . OoooooooOO
Oo000 = lisp . lisp_parse_packet ( IIIII1Ii , Ii , i1oOOoo0o0OOOO , i1IiII1III )
if 97 - 97: O0 / OOooOOo + o0oOOo0O0Ooo . oO0o % OoOoOO00 - OoOoOO00
if 33 - 33: I11i % II111iiii + OoO0O00
if 93 - 93: i1IIi . IiII / I1IiiI + IiII
if 58 - 58: I1ii11iIi11i + O0 . Oo0Ooo + OoOoOO00 - OoO0O00 - OoOoOO00
if 41 - 41: Oo0Ooo / i1IIi / Oo0Ooo - iII111i . o0oOOo0O0Ooo
if ( Oo000 ) :
i11i11II11i = [ i111I , i111I ]
lisp . lisp_start_rloc_probe_timer ( 0 , i11i11II11i )
if 65 - 65: O0 * i11iIiiIii . OoooooooOO / I1IiiI / iII111i
if 69 - 69: ooOoO0o % ooOoO0o
if 76 - 76: i11iIiiIii * iII111i / OoO0O00 % I1ii11iIi11i + OOooOOo
if 48 - 48: iIii1I11I1II1 % i1IIi + OoOoOO00 % o0oOOo0O0Ooo
if 79 - 79: OoOoOO00 % I1IiiI % Ii1I / i1IIi % OoO0O00
if 56 - 56: iIii1I11I1II1 - i11iIiiIii * iII111i
if 84 - 84: OOooOOo + Ii1I + o0oOOo0O0Ooo
if ( oO0oIIII in o0o000Oo ) :
Ii1I1 , i1oOOoo0o0OOOO , i1IiII1III , Ii = lisp . lisp_receive ( oO0oIIII , True )
if 33 - 33: Ii1I
if ( i1oOOoo0o0OOOO == "" ) : break
if 93 - 93: ooOoO0o
if ( Ii1I1 == "command" ) :
if ( Ii == "clear" ) :
lisp . lisp_clear_map_cache ( )
continue
if 34 - 34: oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
if ( Ii . find ( "nonce%" ) != - 1 ) :
OOo00ooOoO0o ( Ii )
continue
if 19 - 19: I1ii11iIi11i
lispconfig . lisp_process_command ( oO0oIIII , Ii1I1 ,
Ii , "lisp-itr" , [ o0O0OOo0oO ] )
elif ( Ii1I1 == "api" ) :
lisp . lisp_process_api ( "lisp-itr" , oO0oIIII , Ii )
elif ( Ii1I1 == "data-packet" ) :
IIIii ( Ii , "ipc" )
else :
if ( lisp . lisp_is_rloc_probe_reply ( Ii [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe request, using pcap" )
continue
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
lisp . lisp_parse_packet ( II1iII1i , Ii , i1oOOoo0o0OOOO , i1IiII1III )
if 66 - 66: O0
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
if 28 - 28: iIii1I11I1II1
I1II ( )
lisp . lisp_print_banner ( "ITR normal exit" )
exit ( 0 )
if 7 - 7: o0oOOo0O0Ooo % IiII * OoOoOO00
if 58 - 58: IiII / I11i + II111iiii % iII111i - OoooooooOO
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
launcher.py
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import signal
import subprocess
import threading
import shlex
from esrally import config, time, exceptions, client
from esrally.mechanic import telemetry, cluster
from esrally.utils import process, jvm
def wait_for_rest_layer(es, max_attempts=20):
for attempt in range(max_attempts):
import elasticsearch
try:
es.info()
return True
except elasticsearch.TransportError as e:
if e.status_code == 503 or isinstance(e, elasticsearch.ConnectionError):
time.sleep(1)
elif e.status_code == 401:
time.sleep(1)
else:
raise e
return False
class ClusterLauncher:
"""
The cluster launcher performs cluster-wide tasks that need to be done in the startup / shutdown phase.
"""
def __init__(self, cfg, metrics_store, client_factory_class=client.EsClientFactory):
"""
Creates a new ClusterLauncher.
:param cfg: The config object.
:param metrics_store: A metrics store that is configured to receive system metrics.
:param client_factory_class: A factory class that can create an Elasticsearch client.
"""
self.cfg = cfg
self.metrics_store = metrics_store
self.client_factory = client_factory_class
self.logger = logging.getLogger(__name__)
def start(self):
"""
Performs final startup tasks.
Precondition: All cluster nodes have been started.
Postcondition: The cluster is ready to receive HTTP requests or a ``LaunchError`` is raised.
:return: A representation of the launched cluster.
"""
enabled_devices = self.cfg.opts("mechanic", "telemetry.devices")
telemetry_params = self.cfg.opts("mechanic", "telemetry.params")
all_hosts = self.cfg.opts("client", "hosts").all_hosts
default_hosts = self.cfg.opts("client", "hosts").default
preserve = self.cfg.opts("mechanic", "preserve.install")
es = {}
for cluster_name, cluster_hosts in all_hosts.items():
all_client_options = self.cfg.opts("client", "options").all_client_options
cluster_client_options = dict(all_client_options[cluster_name])
# Use retries to avoid aborts on long living connections for telemetry devices
cluster_client_options["retry-on-timeout"] = True
es[cluster_name] = self.client_factory(cluster_hosts, cluster_client_options).create()
es_default = es["default"]
t = telemetry.Telemetry(enabled_devices, devices=[
telemetry.NodeStats(telemetry_params, es, self.metrics_store),
telemetry.ClusterMetaDataInfo(es_default),
telemetry.ClusterEnvironmentInfo(es_default, self.metrics_store),
telemetry.GcTimesSummary(es_default, self.metrics_store),
telemetry.IndexStats(es_default, self.metrics_store),
telemetry.MlBucketProcessingTime(es_default, self.metrics_store),
telemetry.CcrStats(telemetry_params, es, self.metrics_store)
])
# The list of nodes will be populated by ClusterMetaDataInfo, so no need to do it here
c = cluster.Cluster(default_hosts, [], t, preserve)
self.logger.info("All cluster nodes have successfully started. Checking if REST API is available.")
if wait_for_rest_layer(es_default, max_attempts=40):
self.logger.info("REST API is available. Attaching telemetry devices to cluster.")
t.attach_to_cluster(c)
self.logger.info("Telemetry devices are now attached to the cluster.")
else:
# Just stop the cluster here and raise. The caller is responsible for terminating individual nodes.
self.logger.error("REST API layer is not yet available. Forcefully terminating cluster.")
self.stop(c)
raise exceptions.LaunchError("Elasticsearch REST API layer is not available. Forcefully terminated cluster.")
return c
def stop(self, c):
"""
Performs cleanup tasks. This method should be called before nodes are shut down.
:param c: The cluster that is about to be stopped.
"""
c.telemetry.detach_from_cluster(c)
class StartupWatcher:
def __init__(self, node_name, server, startup_event):
self.node_name = node_name
self.server = server
self.startup_event = startup_event
self.logger = logging.getLogger(__name__)
def watch(self):
"""
Reads the output from the ES (node) subprocess.
"""
lines_to_log = 0
while True:
line = self.server.stdout.readline().decode("utf-8")
if len(line) == 0:
self.logger.info("%s (stdout): No more output. Process has likely terminated.", self.node_name)
self.await_termination(self.server)
self.startup_event.set()
break
line = line.rstrip()
# if an error occurs, log the next few lines
if "error" in line.lower():
lines_to_log = 10
# don't log each output line as it is contained in the node's log files anyway and we just risk spamming our own log.
if not self.startup_event.isSet() or lines_to_log > 0:
self.logger.info("%s (stdout): %s", self.node_name, line)
lines_to_log -= 1
# no need to check as soon as we have detected node startup
if not self.startup_event.isSet():
if line.find("Initialization Failed") != -1 or line.find("A fatal exception has occurred") != -1:
self.logger.error("[%s] encountered initialization errors.", self.node_name)
# wait a moment to ensure the process has terminated before we signal that we detected a (failed) startup.
self.await_termination(self.server)
self.startup_event.set()
if line.endswith("started") and not self.startup_event.isSet():
self.startup_event.set()
self.logger.info("[%s] has successfully started.", self.node_name)
def await_termination(self, server, timeout=5):
# wait a moment to ensure the process has terminated
wait = timeout
while not server.returncode or wait == 0:
time.sleep(0.1)
server.poll()
wait -= 1
def _start(process, node_name):
log = logging.getLogger(__name__)
startup_event = threading.Event()
watcher = StartupWatcher(node_name, process, startup_event)
t = threading.Thread(target=watcher.watch)
t.setDaemon(True)
t.start()
if startup_event.wait(timeout=InProcessLauncher.PROCESS_WAIT_TIMEOUT_SECONDS):
process.poll()
# has the process terminated?
if process.returncode:
msg = "Node [%s] has terminated with exit code [%s]." % (node_name, str(process.returncode))
log.error(msg)
raise exceptions.LaunchError(msg)
else:
log.info("Started node [%s] with PID [%s].", node_name, process.pid)
return process
else:
msg = "Could not start node [%s] within timeout period of [%s] seconds." % (
node_name, InProcessLauncher.PROCESS_WAIT_TIMEOUT_SECONDS)
# check if the process has terminated already
process.poll()
if process.returncode:
msg += " The process has already terminated with exit code [%s]." % str(process.returncode)
else:
msg += " The process seems to be still running with PID [%s]." % process.pid
log.error(msg)
raise exceptions.LaunchError(msg)
class DockerLauncher:
# May download a Docker image and that can take some time
PROCESS_WAIT_TIMEOUT_SECONDS = 10 * 60
def __init__(self, cfg, metrics_store):
self.cfg = cfg
self.metrics_store = metrics_store
self.binary_paths = {}
self.node_name = None
self.keep_running = self.cfg.opts("mechanic", "keep.running")
self.logger = logging.getLogger(__name__)
def start(self, node_configurations):
nodes = []
for node_configuration in node_configurations:
node_name = node_configuration.node_name
host_name = node_configuration.ip
binary_path = node_configuration.binary_path
self.binary_paths[node_name] = binary_path
p = self._start_process(cmd="docker-compose -f %s up" % binary_path, node_name=node_name)
# only support a subset of telemetry for Docker hosts (specifically, we do not allow users to enable any devices)
node_telemetry = [
telemetry.DiskIo(self.metrics_store, len(node_configurations)),
telemetry.CpuUsage(self.metrics_store),
telemetry.NodeEnvironmentInfo(self.metrics_store)
]
t = telemetry.Telemetry(devices=node_telemetry)
nodes.append(cluster.Node(p, host_name, node_name, t))
return nodes
def _start_process(self, cmd, node_name):
return _start(subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.DEVNULL), node_name)
def stop(self, nodes):
if self.keep_running:
self.logger.info("Keeping Docker container running.")
else:
self.logger.info("Stopping Docker container")
for node in nodes:
node.telemetry.detach_from_node(node, running=True)
process.run_subprocess_with_logging("docker-compose -f %s down" % self.binary_paths[node.node_name])
node.telemetry.detach_from_node(node, running=False)
class ExternalLauncher:
def __init__(self, cfg, metrics_store, client_factory_class=client.EsClientFactory):
self.cfg = cfg
self.metrics_store = metrics_store
self.client_factory = client_factory_class
self.logger = logging.getLogger(__name__)
def start(self, node_configurations=None):
hosts = self.cfg.opts("client", "hosts").default
client_options = self.cfg.opts("client", "options").default
es = self.client_factory(hosts, client_options).create()
# cannot enable custom telemetry devices here
t = telemetry.Telemetry(devices=[
# This is needed to actually populate the nodes
telemetry.ClusterMetaDataInfo(es),
# will gather node specific meta-data for all nodes
telemetry.ExternalEnvironmentInfo(es, self.metrics_store),
])
# We create a pseudo-cluster here to get information about all nodes.
# cluster nodes will be populated by the external environment info telemetry device. We cannot know this upfront.
c = cluster.Cluster(hosts, [], t)
user_defined_version = self.cfg.opts("mechanic", "distribution.version", mandatory=False)
distribution_version = es.info()["version"]["number"]
if not user_defined_version or user_defined_version.strip() == "":
self.logger.info("Distribution version was not specified by user. Rally-determined version is [%s]", distribution_version)
self.cfg.add(config.Scope.benchmark, "mechanic", "distribution.version", distribution_version)
elif user_defined_version != distribution_version:
self.logger.warning("Distribution version '%s' on command line differs from actual cluster version '%s'.",
user_defined_version, distribution_version)
t.attach_to_cluster(c)
return c.nodes
def stop(self, nodes):
# nothing to do here, externally provisioned clusters / nodes don't have any specific telemetry devices attached.
pass
class InProcessLauncher:
"""
Launcher is responsible for starting and stopping the benchmark candidate.
"""
PROCESS_WAIT_TIMEOUT_SECONDS = 90.0
def __init__(self, cfg, metrics_store, races_root_dir, clock=time.Clock):
self.cfg = cfg
self.metrics_store = metrics_store
self._clock = clock
self.races_root_dir = races_root_dir
self.keep_running = self.cfg.opts("mechanic", "keep.running")
self.override_runtime_jdk = self.cfg.opts("mechanic", "runtime.jdk")
self.logger = logging.getLogger(__name__)
def start(self, node_configurations):
# we're very specific which nodes we kill as there is potentially also an Elasticsearch based metrics store running on this machine
# The only specific trait of a Rally-related process is that is started "somewhere" in the races root directory.
#
# We also do this only once per host otherwise we would kill instances that we've just launched.
process.kill_running_es_instances(self.races_root_dir)
node_count_on_host = len(node_configurations)
return [self._start_node(node_configuration, node_count_on_host) for node_configuration in node_configurations]
def _start_node(self, node_configuration, node_count_on_host):
host_name = node_configuration.ip
node_name = node_configuration.node_name
car = node_configuration.car
binary_path = node_configuration.binary_path
data_paths = node_configuration.data_paths
node_telemetry_dir = "%s/telemetry" % node_configuration.node_root_path
java_major_version, java_home = self._resolve_java_home(car)
self.logger.info("Starting node [%s] based on car [%s].", node_name, car)
enabled_devices = self.cfg.opts("mechanic", "telemetry.devices")
telemetry_params = self.cfg.opts("mechanic", "telemetry.params")
node_telemetry = [
telemetry.FlightRecorder(telemetry_params, node_telemetry_dir, java_major_version),
telemetry.JitCompiler(node_telemetry_dir),
telemetry.Gc(node_telemetry_dir, java_major_version),
telemetry.PerfStat(node_telemetry_dir),
telemetry.DiskIo(self.metrics_store, node_count_on_host),
telemetry.CpuUsage(self.metrics_store),
telemetry.NodeEnvironmentInfo(self.metrics_store),
telemetry.IndexSize(data_paths, self.metrics_store),
telemetry.MergeParts(self.metrics_store, node_configuration.log_path),
telemetry.StartupTime(self.metrics_store),
]
t = telemetry.Telemetry(enabled_devices, devices=node_telemetry)
env = self._prepare_env(car, node_name, java_home, t)
t.on_pre_node_start(node_name)
node_process = self._start_process(env, node_name, binary_path)
node = cluster.Node(node_process, host_name, node_name, t)
self.logger.info("Attaching telemetry devices to node [%s].", node_name)
t.attach_to_node(node)
return node
def _resolve_java_home(self, car):
runtime_jdk_versions = self._determine_runtime_jdks(car)
self.logger.info("Allowed JDK versions are %s.", runtime_jdk_versions)
major, java_home = jvm.resolve_path(runtime_jdk_versions)
self.logger.info("Detected JDK with major version [%s] in [%s].", major, java_home)
return major, java_home
def _determine_runtime_jdks(self, car):
if self.override_runtime_jdk:
return [self.override_runtime_jdk]
else:
runtime_jdks = car.mandatory_var("runtime.jdk")
try:
return [int(v) for v in runtime_jdks.split(",")]
except ValueError:
raise exceptions.SystemSetupError("Car config key \"runtime.jdk\" is invalid: \"{}\" (must be int)".format(runtime_jdks))
def _prepare_env(self, car, node_name, java_home, t):
env = {}
env.update(os.environ)
env.update(car.env)
self._set_env(env, "PATH", os.path.join(java_home, "bin"), separator=os.pathsep)
# Don't merge here!
env["JAVA_HOME"] = java_home
# we just blindly trust telemetry here...
for k, v in t.instrument_candidate_env(car, node_name).items():
self._set_env(env, k, v)
exit_on_oome_flag = "-XX:+ExitOnOutOfMemoryError"
if jvm.supports_option(java_home, exit_on_oome_flag):
self.logger.info("Setting [%s] to detect out of memory errors during the benchmark.", exit_on_oome_flag)
self._set_env(env, "ES_JAVA_OPTS", exit_on_oome_flag)
else:
self.logger.info("JVM does not support [%s]. A JDK upgrade is recommended.", exit_on_oome_flag)
self.logger.debug("env for [%s]: %s", node_name, str(env))
return env
def _set_env(self, env, k, v, separator=' '):
if v is not None:
if k not in env:
env[k] = v
else: # merge
env[k] = v + separator + env[k]
def _start_process(self, env, node_name, binary_path):
if os.geteuid() == 0:
raise exceptions.LaunchError("Cannot launch Elasticsearch as root. Please run Rally as a non-root user.")
os.chdir(binary_path)
cmd = ["bin/elasticsearch"]
return _start(subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.DEVNULL, env=env), node_name)
def stop(self, nodes):
if self.keep_running:
self.logger.info("Keeping [%d] nodes on this host running.", len(nodes))
else:
self.logger.info("Shutting down [%d] nodes on this host.", len(nodes))
for node in nodes:
process = node.process
node_name = node.node_name
node.telemetry.detach_from_node(node, running=True)
if not self.keep_running:
stop_watch = self._clock.stop_watch()
stop_watch.start()
try:
os.kill(process.pid, signal.SIGINT)
process.wait(10.0)
self.logger.info("Done shutdown node [%s] in [%.1f] s.", node_name, stop_watch.split_time())
except ProcessLookupError:
self.logger.warning("No process found with PID [%s] for node [%s]", process.pid, node_name)
except subprocess.TimeoutExpired:
# kill -9
self.logger.warning("Node [%s] did not shut down after 10 seconds; now kill -QUIT node, to see threads:", node_name)
try:
os.kill(process.pid, signal.SIGQUIT)
except OSError:
self.logger.warning("No process found with PID [%s] for node [%s]", process.pid, node_name)
break
try:
process.wait(120.0)
self.logger.info("Done shutdown node [%s] in [%.1f] s.", node_name, stop_watch.split_time())
break
except subprocess.TimeoutExpired:
pass
self.logger.info("kill -KILL node [%s]", node_name)
try:
process.kill()
except ProcessLookupError:
self.logger.warning("No process found with PID [%s] for node [%s]", process.pid, node_name)
node.telemetry.detach_from_node(node, running=False)
|
Recognizer.py
|
import audioop
import collections
import math
import threading
import time
from danspeech.errors.recognizer_errors import ModelNotInitialized, WaitTimeoutError, WrongUsageOfListen, NoDataInBuffer
from danspeech.DanSpeechRecognizer import DanSpeechRecognizer
from danspeech.audio.resources import SpeechSource, AudioData
import numpy as np
class Recognizer(object):
"""
Recognizer Class, which represents a collection of speech recognition functionality.
None of the parameters are required, but you need to update the Recognizer with a valid model
before being able to perform speech recognition.
:param DeepSpeech model:
A valid DanSpeech model (``danspeech.deepspeech.model.DeepSpeech``)
See :ref:`pre-trained-models` for more information.
This can also be your custom DanSpeech trained model.
:param str lm:
A path (``str``) to a valid .klm language model. See :ref:`language-models` for a list
of pretrained available models.
:param bool with_gpu:
A ``bool`` representing whether you want to run the ``Recognizer`` with a GPU.
Note: Requires a GPU.
:param \**kwargs:
Additional decoder arguments. See :meth:`Recognizer.update_decoder` for more information.
:Example:
recognizer = Recognizer()
"""
def __init__(self, model=None, lm=None, with_gpu=False, **kwargs):
# Listening to a stream parameters
# minimum audio energy to consider for recording
self.energy_threshold = 1000
# seconds of non-speaking audio before a phrase is considered complete
self.pause_threshold = 0.8
# minimum seconds of speaking audio before we consider the speaking audio a phrase
# values below this are ignored (for filtering out clicks and pops)
self.phrase_threshold = 0.3
# seconds of non-speaking audio to keep on both sides of the recording
self.non_speaking_duration = 0.35
# Seconds before we consider a clip and actual clip
self.mininum_required_speaking_seconds = 0.7
# Adjust energy params
self.dynamic_energy_threshold = True
self.dynamic_energy_adjustment_damping = 0.15
self.dynamic_energy_ratio = 1.5
self.danspeech_recognizer = DanSpeechRecognizer(with_gpu=with_gpu, **kwargs)
self.stream = False
self.stream_thread_stopper = None
if model:
self.update_model(model)
if lm:
if not model:
raise ModelNotInitialized("Trying to initialize language model without also choosing a DanSpeech "
"acoustic model.")
else:
self.update_decoder(lm=lm)
# Being able to bind the microphone to the recognizer is useful.
self.microphone = None
def recognize(self, audio_data, show_all=False):
"""
Performs speech recognition with the current initialized DanSpeech model
(``danspeech.deepspeech.model.DeepSpeech``).
:param array audio_data: ``Numpy array`` of audio data. Use :meth:`audio.load_audio` to load your audio
into a valid format.
:param bool show_all: Whether to return all beams from beam search, if decoding is
performed with a language model.
:return: The most likely transcription if ``show_all=false`` (the default). Otherwise, returns the
most likely beams from beam search with a language model.
:rtype: str or list[str] if ``show_all=True``.
"""
return self.danspeech_recognizer.transcribe(audio_data, show_all=show_all)
def update_model(self, model):
"""
Updates the DanSpeech model being used by the Recognizer.
:param model: A valid DanSpeech model (``danspeech.deepspeech.model.DeepSpeech``).
See :ref:`pre-trained-models` for a list of pretrained available models.
This can also be your custom DanSpeech trained model.
"""
self.danspeech_recognizer.update_model(model)
print("DanSpeech model updated to: {0}".format(model.model_name))
def update_decoder(self, lm=None, alpha=None, beta=None, beam_width=None):
"""
Updates the decoder being used by the Recognizer. By default, greedy decoding of the DanSpeech
model will be performed.
If lm is None or lm="greedy", then the decoding will be performed by greedy decoding, and the alpha, beta and
beam width parameters are therefore ignored.
**Warning:** Language models requires the `ctc-decode <https://github.com/parlance/ctcdecode.git>`_
python package to work.
:param str lm: A path to a valid .klm language model. See :ref:`language-models` for a list
of pretrained available models.
:param float alpha: Alpha parameter of beam search decoding. If None, then the default parameter of
``alpha=1.3`` is used
:param float beta: Beta parameter of beam search decoding. If None, then the default parameter of
``beta=0.2`` is used
:param int beam_width: Beam width of beam search decoding. If None, then the default parameter of
``beam_width=64`` is used.
"""
self.danspeech_recognizer.update_decoder(lm=lm, alpha=alpha, beta=beta, beam_width=beam_width)
print("DanSpeech decoder updated ") # ToDO: Include model name
def listen(self, source, timeout=None, phrase_time_limit=None):
"""
Source: https://github.com/Uberi/speech_recognition/blob/master/speech_recognition/__init__.py
Modified for DanSpeech.
Listens to a stream of audio.
"""
assert isinstance(source, SpeechSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before listening, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?"
assert self.pause_threshold >= self.non_speaking_duration >= 0
seconds_per_buffer = float(source.chunk) / source.sampling_rate
pause_buffer_count = int(math.ceil(
self.pause_threshold / seconds_per_buffer)) # number of buffers of non-speaking audio during a phrase, before the phrase should be considered complete
phrase_buffer_count = int(math.ceil(
self.phrase_threshold / seconds_per_buffer)) # minimum number of buffers of speaking audio before we consider the speaking audio a phrase
non_speaking_buffer_count = int(math.ceil(
self.non_speaking_duration / seconds_per_buffer)) # maximum number of buffers of non-speaking audio to retain before and after a phrase
# read audio input for phrases until there is a phrase that is long enough
elapsed_time = 0 # number of seconds of audio read
while True:
frames = collections.deque()
# store audio input until the phrase starts
while True:
# handle waiting too long for phrase by raising an exception
elapsed_time += seconds_per_buffer
if timeout and elapsed_time > timeout:
raise WaitTimeoutError("listening timed out while waiting for phrase to start")
buffer = source.stream.read(source.chunk)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
if len(
frames) > non_speaking_buffer_count: # ensure we only keep the needed amount of non-speaking buffers
frames.popleft()
# detect whether speaking has started on audio input
energy = audioop.rms(buffer, source.sampling_width) # energy of the audio signal
if energy > self.energy_threshold: break
# dynamically adjust the energy threshold using asymmetric weighted average
if self.dynamic_energy_threshold:
damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates
target_energy = energy * self.dynamic_energy_ratio
self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping)
# read audio input until the phrase ends
pause_count, phrase_count = 0, 0
phrase_start_time = elapsed_time
while True:
# handle phrase being too long by cutting off the audio
elapsed_time += seconds_per_buffer
if phrase_time_limit and elapsed_time - phrase_start_time > phrase_time_limit:
break
buffer = source.stream.read(source.chunk)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
phrase_count += 1
# check if speaking has stopped for longer than the pause threshold on the audio input
energy = audioop.rms(buffer, source.sampling_width) # unit energy of the audio signal within the buffer
if energy > self.energy_threshold:
pause_count = 0
else:
pause_count += 1
if pause_count > pause_buffer_count: # end of the phrase
break
# check how long the detected phrase is, and retry listening if the phrase is too short
phrase_count -= pause_count # exclude the buffers for the pause before the phrase
if phrase_count >= phrase_buffer_count or len(
buffer) == 0: break # phrase is long enough or we've reached the end of the stream, so stop listening
# obtain frame data
for i in range(
pause_count - non_speaking_buffer_count): frames.pop() # remove extra non-speaking frames at the end
frame_data = b"".join(frames)
return AudioData(frame_data, source.sampling_rate, source.sampling_width)
def listen_stream(self, source, timeout=None, phrase_time_limit=None):
"""
Adapted from: https://github.com/Uberi/speech_recognition/blob/master/speech_recognition/__init__.py
Generator used to listen to the audio from a source e.g. a microphone. This generator is used
by the streaming models.
:param source: Source of audio. Needs to be a Danspeech.audio.resources.SpeechSource instance
:param frames_first: Required frames before yielding data for the first pass to the streaming model
:param frames_rest: Minimum required frames for passes after the first pass of the streaming model.
:param timeout: Maximum number of seconds that this will wait until a phrase starts
:param phrase_time_limit: Maxumum number of seconds to that will allow a phrase to continue before stopping
:return: Data and an indicator whether it is the last part of a streaming part
"""
# ToDO: Change the assertions
assert isinstance(source, SpeechSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before listening, " \
"see documentation for ``AudioSource``; are you using " \
"``source`` outside of a ``with`` statement?"
assert self.pause_threshold >= self.non_speaking_duration >= 0
seconds_per_buffer = float(source.chunk) / source.sampling_rate
pause_buffer_count = int(math.ceil(
self.pause_threshold / seconds_per_buffer)) # number of buffers of non-speaking audio during a phrase, before the phrase should be considered complete
phrase_buffer_count = int(math.ceil(
self.phrase_threshold / seconds_per_buffer)) # minimum number of buffers of speaking audio before we consider the speaking audio a phrase
non_speaking_buffer_count = int(math.ceil(
self.non_speaking_duration / seconds_per_buffer)) # maximum number of buffers of non-speaking audio to retain before and after a phrase
# read audio input for phrases until there is a phrase that is long enough
elapsed_time = 0 # number of seconds of audio read
buffer = []
while self.stream:
frames = []
# store audio input until the phrase starts
while True and self.stream:
# handle waiting too long for phrase by raising an exception
elapsed_time += seconds_per_buffer
if timeout and elapsed_time > timeout:
raise WaitTimeoutError("listening timed out while waiting for phrase to start")
buffer = source.stream.read(source.chunk)
if len(buffer) == 0:
break # reached end of the stream
frames.append(buffer)
if len(frames) > non_speaking_buffer_count:
# ensure we only keep the needed amount of non-speaking buffers
frames.pop(0)
# detect whether speaking has started on audio input
energy = audioop.rms(buffer, source.sampling_width) # energy of the audio signal
if energy > self.energy_threshold:
break
# If streaming has stopped while looking for speech, break out of thread so it can stop
if not self.stream:
yield False, []
# Yield the silence in the beginning
yield False, frames
# read audio input until the phrase ends
pause_count, phrase_count = 0, 0
phrase_start_time = elapsed_time
while True:
buffer = source.stream.read(source.chunk)
if len(buffer) == 0:
break # reached end of the stream
# handle phrase being too long by cutting off the audio
elapsed_time += seconds_per_buffer
if phrase_time_limit and elapsed_time - phrase_start_time > phrase_time_limit:
break
phrase_count += 1
# check if speaking has stopped for longer than the pause threshold on the audio input
energy = audioop.rms(buffer, source.sampling_width) # unit energy of the audio signal within the buffer
if energy > self.energy_threshold:
pause_count = 0
else:
pause_count += 1
if pause_count > pause_buffer_count: # end of the phrase
break
# If data is being processed
yield False, buffer
# check how long the detected phrase is, and retry listening if the phrase is too short
phrase_count -= pause_count # exclude the buffers for the pause before the phrase
if phrase_count >= phrase_buffer_count or len(buffer) == 0:
break # phrase is long enough or we've reached the end of the stream, so stop listening
# Ending of stream, should start a new stream
if len(buffer) == 0:
yield True, []
else:
yield True, buffer
# If we go here, then it is wrong usage of stream
raise WrongUsageOfListen("Wrong usage of stream. Overwrite the listen generator with a new generator instance"
"since this instance has completed a full listen.")
@staticmethod
def get_audio_data(frames, source):
"""
Function to convert the frames (bytes) from a stream to an array used for DanSpeech models
:param frames: Byte frames
:param source: Source of stream/frames
:return: Numpy array with speech data
"""
# obtain frame data
frame_data = b"".join(frames)
return AudioData(frame_data, source.sampling_rate, source.sampling_width).get_array_data()
def listen_in_background(self, source):
"""
Spawns a thread which listens to the source of data
:param source: Source of stream/frames
:param first_required_frames: Required frames before yielding data for the first pass to the streaming model
:param general_required_frames: Minimum required frames for passes after the first pass of the streaming model.
:return: Stopper function used to stop the thread, and a data_getter which returns data from the thread
according current steps.
"""
assert isinstance(source, SpeechSource), "Source must be an audio source"
# These act as globals variables for thread helper functions
running = [True]
data = []
def threaded_listen():
# Thread to run in background
with source as s:
while running[0]:
generator = self.listen_stream(s)
try: # Listen until stream detects silence
while True:
is_last_, temp = next(generator)
if isinstance(temp, list):
temp = self.get_audio_data(temp, source)
else:
temp = self.get_audio_data([temp], source)
# Append data
data.append((is_last_, temp))
# If is last, we start new listen generator
if is_last_:
break
except WaitTimeoutError: # listening timed out, just try again
pass
def stopper(wait_for_stop=True):
running[0] = False
if wait_for_stop:
listener_thread.join() # block until the background thread is done, which can take around 1 second
def get_data():
while True:
try:
is_last_, audio = data[0]
# Remove from buffer
data.pop(0)
break
except IndexError:
raise NoDataInBuffer
return is_last_, audio
listener_thread = threading.Thread(target=threaded_listen)
listener_thread.daemon = True
listener_thread.start()
return stopper, get_data
def enable_streaming(self):
"""
Adjusts the Recognizer to continuously transcribe a stream of audio input.
Use this before starting a stream.
:example:
.. code-block:: python
recognizer.enable_streaming()
"""
if self.stream:
print("Streaming already enabled...")
else:
self.stream = True
def disable_streaming(self):
"""
Adjusts the Recognizer to stop expecting a stream of audio input.
Use this after cancelling a stream.
:example:
.. code-block:: python
recognizer.disable_streaming()
"""
if self.stream:
self.stream = False
self.stream_thread_stopper(wait_for_stop=False)
else:
self.stream = True
def streaming(self, source):
"""
Generator class for a stream audio source a :meth:`Microphone`
Spawns a background thread and uses the loaded model to continuously transcribe audio input between
detected silences from the :meth:`Microphone` stream.
**Warning:** Requires that :meth:`Recognizer.enable_streaming` has been called.
:param Microphone source: Source of audio.
:example:
.. code-block:: python
generator = recognizer.streaming(source=m)
# Runs for a long time. Insert your own stop condition.
for i in range(100000):
trans = next(generator)
print(trans)
"""
stopper, data_getter = self.listen_in_background(source)
self.stream_thread_stopper = stopper
is_last = False
is_first_data = False
data_array = []
while self.stream:
# Loop for data (gets all the available data from the stream)
while True:
# If it is the last one in a stream, break and perform recognition no matter what
if is_last:
is_first_data = True
break
# Get all available data
try:
if is_first_data:
is_last, data_array = data_getter()
is_first_data = False
else:
is_last, temp = data_getter()
data_array = np.concatenate((data_array, temp))
# If this exception is thrown, then we no available data
except NoDataInBuffer:
# If no data in buffer, we sleep and wait
time.sleep(0.2)
# Since we only break out of data loop, if we need a prediction, the following works
# We only do a prediction if the length of gathered audio is above a threshold
if len(data_array) > self.mininum_required_speaking_seconds * source.sampling_rate:
yield self.recognize(data_array)
is_last = False
data_array = []
def enable_real_time_streaming(self, streaming_model, secondary_model=None, string_parts=True):
"""
Adjusts the Recognizer to continuously transcribe a stream of audio input real-time.
Real-time audio streaming utilize a uni-directional model to transcribe an utterance while
being spoken in contrast to :meth:`Recognizer.streaming`, where the utterance is transcribed after
a silence has been detenced.
Use this before starting a (:meth:`Recognizer.real_time_streaming`) stream.
:param DeepSpeech streaming_model: The DanSpeech model to perform streaming. This model needs to
be uni-directional. This is required for real-time streaming to work.
The two available DanSpeech models are :meth:`pretrained_models.CPUStreamingRNN` and
:meth:`pretrained_models.GPUStreamingRNN` but you may use your own custom streaming model as well.
:param DeepSpeech secondary_model: A valid DanSpeech model (``danspeech.deepspeech.model.DeepSpeech``).
The secondary model transcribes the output after a silence is detected. This is useful since the performance
of uni-directional models is very poor compared to bi-directional models, which require the full utterance.
See :ref:`pre-trained-models` for more information on available models. This can also be your custom DanSpeech trained model.
:param bool string_parts:
Boolean indicating whether you want the generator (:meth:`Recognizer.real_time_streaming`) to yield
parts of the string or the whole (currrent) iterating string. Recommended is ``string_parts=True`` and then
keep track of the transcription yourself.
:example:
.. code-block:: python
recognizer.enable_real_time_streaming(streaming_model=CPUStreamingRNN())
"""
# Update streaming model from Recognizer and not inside the DanSpeechRecognizer
self.update_model(streaming_model)
self.danspeech_recognizer.enable_streaming(secondary_model, string_parts)
self.stream = True
def disable_real_time_streaming(self, keep_secondary_model_loaded=False):
"""
Adjusts the Recognizer to stop expecting a stream of audio input.
Use this after cancelling a stream.
:param bool keep_secondary_model_loaded: Whether to keep the secondary model in memory or not. Generally,
you do not want to keep it in memory, unless you want to perform :meth:`Recognizer.real_time_streaming`
again after disabling the real-time streaming.
:example:
.. code-block:: python
recognizer.disable_real_time_streaming()
"""
if self.stream:
print("Stopping microphone stream...")
self.stream = False
self.stream_thread_stopper(wait_for_stop=False)
self.danspeech_recognizer.disable_streaming(keep_secondary_model=keep_secondary_model_loaded)
else:
print("No stream is running for the Recognizer")
def real_time_streaming(self, source):
"""
Generator class for a real-time stream audio source a :meth:`Microphone`.
Spawns a background thread and uses the loaded model(s) to continuously transcribe an audio utterance
while it is being spoken.
**Warning:** Requires that :meth:`Recognizer.enable_real_time_streaming` has been called.
:param Microphone source: Source of audio.
:example:
.. code-block:: python
generator = r.real_time_streaming(source=m)
iterating_transcript = ""
print("Speak!")
while True:
is_last, trans = next(generator)
# If the transcription is empty, it means that the energy level required for data
# was passed, but nothing was predicted.
if is_last and trans:
print("Final: " + trans)
iterating_transcript = ""
continue
if trans:
iterating_transcript += trans
print(iterating_transcript)
continue
The generator yields both a boolean (is_last) to indicate whether it is a full utterance
(detected by silences in audio input) and the (current/part) transcription. If the is_last boolean is true,
then it is a full utterance determined by a silence.
**Warning:** This method assumes that you use a model with default spectrogram/audio parameters i.e. 20ms
audio for each stft and 50% overlap.
"""
lookahead_context = self.danspeech_recognizer.model.context
required_spec_frames = (lookahead_context - 1) * 2
samples_pr_10ms = int(source.sampling_rate / 100)
# First takes two samples pr 10ms, the rest needs 160 due to overlapping
general_sample_requirement = samples_pr_10ms * 2 + (samples_pr_10ms * (required_spec_frames - 1))
# First pass, we need more samples due to padding of initial conv layers
first_sample_requirement = general_sample_requirement + (samples_pr_10ms * 15)
data_array = []
is_first_data = True
is_first_pass = True
stopper, data_getter = self.listen_in_background(source)
self.stream_thread_stopper = stopper
is_last = False
output = None
consecutive_fails = 0
data_success = False
# Wait 0.2 seconds before we start processing to let the background thread spawn
time.sleep(0.2)
while self.stream:
# Loop for data (gets all the available data from the stream)
while True:
# If it is the last one in a stream, break and perform recognition no matter what
if is_last:
break
# Get all available data
try:
if is_first_data:
is_last, data_array = data_getter()
is_first_data = False
data_success = True
else:
is_last, temp = data_getter()
data_array = np.concatenate((data_array, temp))
data_success = True
# If this exception is thrown, then we have no available data
except NoDataInBuffer:
# If it is first data and no data in buffer, then do not break but sleep.
# We got some data, now process
if data_success:
data_success = False
consecutive_fails = 0
break
# We did not get data and it was the first try, sleep for 0.4 seconds
if is_first_data:
time.sleep(0.4)
else:
consecutive_fails += 1
# If two fails happens in a row, we sleep for 0.3 seconds
if consecutive_fails == 2:
consecutive_fails = 0
time.sleep(0.3)
# If it is the first pass, then we try to pass it
if is_first_pass:
# If is last and we have not performed first pass, then it should be discarded and we continue
if is_last:
output = None
# Check if we have enough frames for first pass
elif len(data_array) >= first_sample_requirement:
output = self.danspeech_recognizer.streaming_transcribe(data_array,
is_last=False,
is_first=True)
# Now first pass has been performed
is_first_pass = False
# Gather new data buffer
data_array = []
is_first_data = True
else:
# If is last, we do not care about general sample requirement but just pass it through
if is_last:
output = self.danspeech_recognizer.streaming_transcribe(data_array,
is_last=is_last,
is_first=False)
# Gather new data buffer
data_array = []
is_first_data = True
# General case! We need some data.
elif len(data_array) >= general_sample_requirement:
output = self.danspeech_recognizer.streaming_transcribe(data_array,
is_last=is_last,
is_first=False)
# Gather new data buffer
data_array = []
is_first_data = True
# Is last should always generate output!
if is_last and output:
yield is_last, output
elif output:
yield is_last, output
output = None
# Reset streaminng
if is_last:
is_first_pass = True
is_last = False
output = None
def adjust_for_speech(self, source, duration=4):
"""
Adjusts the energy level threshold required for the :meth:`audio.Microphone` to detect
speech in background.
**Warning:** You need to talk after calling this method! Else, the energy level will be too low. If talking
to adjust energy level is not an option, use :meth:`Recognizer.adjust_for_ambient_noise` instead.
Only use if the default energy level does not match your use case.
:param Microphone source: Source of audio.
:param float duration: Maximum duration of adjusting the energy threshold
"""
assert isinstance(source, SpeechSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before adjusting, " \
"see documentation for ``AudioSource``; are you using ``source``" \
" outside of a ``with`` statement?"
assert self.pause_threshold >= self.non_speaking_duration >= 0
seconds_per_buffer = (source.chunk + 0.0) / source.sampling_rate
elapsed_time = 0
energy_levels = []
# adjust energy threshold until a phrase starts
while True:
elapsed_time += seconds_per_buffer
if elapsed_time > duration:
break
buffer = source.stream.read(source.chunk)
energy = audioop.rms(buffer, source.sampling_width) # energy of the audio signal
energy_levels.append(energy)
energy_average = sum(energy_levels) / len(energy_levels)
# Subtract some ekstra energy, since we take average
if energy_average > 80:
self.energy_threshold = energy_average - 80
else:
self.energy_threshold = energy_average
def adjust_for_ambient_noise(self, source, duration=2):
"""
Source: https://github.com/Uberi/speech_recognition/blob/master/speech_recognition/__init__.py
Modified for DanSpeech
Adjusts the energy level threshold required for the :meth:`audio.Microphone` to detect
speech in background. It is based on the energy level in the background.
**Warning:** Do not talk while adjusting energy threshold with this method. This method generally
sets the energy level very low. We recommend using :meth:`Recognizer.adjust_for_speech` instead.
Only use if the default energy level does not match your use case.
:param Microphone source: Source of audio.
:param float duration: Maximum duration of adjusting the energy threshold
"""
assert isinstance(source, SpeechSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before adjusting, " \
"see documentation for ``AudioSource``; are you using " \
"``source`` outside of a ``with`` statement?"
assert self.pause_threshold >= self.non_speaking_duration >= 0
seconds_per_buffer = (source.chunk + 0.0) / source.sampling_rate
elapsed_time = 0
# adjust energy threshold until a phrase starts
while True:
elapsed_time += seconds_per_buffer
if elapsed_time > duration:
break
buffer = source.stream.read(source.chunk)
energy = audioop.rms(buffer, source.sampling_width) # energy of the audio signal
# dynamically adjust the energy threshold using asymmetric weighted average
damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates
target_energy = energy * self.dynamic_energy_ratio
self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping)
def update_stream_parameters(self, energy_threshold=None, pause_threshold=None,
phrase_threshold=None, non_speaing_duration=None):
"""
Updates parameters for stream of audio. Only use if the default streaming from
your microphone is working poorly.
:param float energy_threshold: Minimum audio energy required for the stream to start detecting an utterance.
:param float pause_threshold: Seconds of non-speaking audio before a phrase is considered complete.
:param float phrase_threshold: Minimum seconds of speaking audio before we consider the speaking audio a phrase.
:param float non_speaing_duration: Seconds of non-speaking audio to keep on both sides of the recording.
"""
if energy_threshold:
self.energy_threshold = energy_threshold
if pause_threshold:
self.pause_threshold = pause_threshold
if phrase_threshold:
self.phrase_threshold = phrase_threshold
if non_speaing_duration:
self.non_speaking_duration = non_speaing_duration
|
dpp-nfc.py
|
#!/usr/bin/python3
#
# Example nfcpy to wpa_supplicant wrapper for DPP NFC operations
# Copyright (c) 2012-2013, Jouni Malinen <j@w1.fi>
# Copyright (c) 2019-2020, The Linux Foundation
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import os
import struct
import sys
import time
import threading
import argparse
import nfc
import ndef
import logging
scriptsdir = os.path.dirname(os.path.realpath(sys.modules[__name__].__file__))
sys.path.append(os.path.join(scriptsdir, '..', '..', 'wpaspy'))
import wpaspy
wpas_ctrl = '/var/run/wpa_supplicant'
ifname = None
init_on_touch = False
in_raw_mode = False
prev_tcgetattr = 0
no_input = False
srv = None
continue_loop = True
terminate_now = False
summary_file = None
success_file = None
my_crn_ready = False
my_crn = None
peer_crn = None
hs_sent = False
netrole = None
operation_success = False
mutex = threading.Lock()
no_alt_proposal = False
C_NORMAL = '\033[0m'
C_RED = '\033[91m'
C_GREEN = '\033[92m'
C_BLUE = '\033[94m'
C_MAGENTA = '\033[95m'
C_CYAN = '\033[96m'
def summary(txt, color=None):
with mutex:
if color:
print(color + txt + C_NORMAL)
else:
print(txt)
if summary_file:
with open(summary_file, 'a') as f:
f.write(txt + "\n")
def success_report(txt):
summary(txt)
if success_file:
with open(success_file, 'a') as f:
f.write(txt + "\n")
def wpas_connect():
ifaces = []
if os.path.isdir(wpas_ctrl):
try:
ifaces = [os.path.join(wpas_ctrl, i) for i in os.listdir(wpas_ctrl)]
except OSError as error:
summary("Could not find wpa_supplicant: %s", str(error))
return None
if len(ifaces) < 1:
summary("No wpa_supplicant control interface found")
return None
for ctrl in ifaces:
if ifname:
if ifname not in ctrl:
continue
try:
summary("Trying to use control interface " + ctrl)
wpas = wpaspy.Ctrl(ctrl)
return wpas
except Exception as e:
pass
return None
def dpp_nfc_uri_process(uri):
wpas = wpas_connect()
if wpas is None:
return False
peer_id = wpas.request("DPP_NFC_URI " + uri)
if "FAIL" in peer_id:
summary("Could not parse DPP URI from NFC URI record", color=C_RED)
return False
peer_id = int(peer_id)
summary("peer_id=%d for URI from NFC Tag: %s" % (peer_id, uri))
cmd = "DPP_AUTH_INIT peer=%d" % peer_id
global enrollee_only, configurator_only, config_params
if enrollee_only:
cmd += " role=enrollee"
elif configurator_only:
cmd += " role=configurator"
if config_params:
cmd += " " + config_params
summary("Initiate DPP authentication: " + cmd)
res = wpas.request(cmd)
if "OK" not in res:
summary("Failed to initiate DPP Authentication", color=C_RED)
return False
summary("DPP Authentication initiated")
return True
def dpp_hs_tag_read(record):
wpas = wpas_connect()
if wpas is None:
return False
summary(record)
if len(record.data) < 5:
summary("Too short DPP HS", color=C_RED)
return False
if record.data[0] != 0:
summary("Unexpected URI Identifier Code", color=C_RED)
return False
uribuf = record.data[1:]
try:
uri = uribuf.decode()
except:
summary("Invalid URI payload", color=C_RED)
return False
summary("URI: " + uri)
if not uri.startswith("DPP:"):
summary("Not a DPP URI", color=C_RED)
return False
return dpp_nfc_uri_process(uri)
def get_status(wpas, extra=None):
if extra:
extra = "-" + extra
else:
extra = ""
res = wpas.request("STATUS" + extra)
lines = res.splitlines()
vals = dict()
for l in lines:
try:
[name, value] = l.split('=', 1)
except ValueError:
summary("Ignore unexpected status line: %s" % l)
continue
vals[name] = value
return vals
def get_status_field(wpas, field, extra=None):
vals = get_status(wpas, extra)
if field in vals:
return vals[field]
return None
def own_addr(wpas):
addr = get_status_field(wpas, "address")
if addr is None:
addr = get_status_field(wpas, "bssid[0]")
return addr
def dpp_bootstrap_gen(wpas, type="qrcode", chan=None, mac=None, info=None,
curve=None, key=None):
cmd = "DPP_BOOTSTRAP_GEN type=" + type
if chan:
cmd += " chan=" + chan
if mac:
if mac is True:
mac = own_addr(wpas)
if mac is None:
summary("Could not determine local MAC address for bootstrap info")
else:
cmd += " mac=" + mac.replace(':', '')
if info:
cmd += " info=" + info
if curve:
cmd += " curve=" + curve
if key:
cmd += " key=" + key
res = wpas.request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
return int(res)
def wpas_get_nfc_uri(start_listen=True, pick_channel=False, chan_override=None):
listen_freq = 2412
wpas = wpas_connect()
if wpas is None:
return None
global own_id, chanlist
if chan_override:
chan = chan_override
else:
chan = chanlist
if chan is None and get_status_field(wpas, "bssid[0]"):
freq = get_status_field(wpas, "freq")
if freq:
freq = int(freq)
if freq >= 2412 and freq <= 2462:
chan = "81/%d" % ((freq - 2407) / 5)
summary("Use current AP operating channel (%d MHz) as the URI channel list (%s)" % (freq, chan))
listen_freq = freq
if chan is None and pick_channel:
chan = "81/6"
summary("Use channel 2437 MHz since no other preference provided")
listen_freq = 2437
own_id = dpp_bootstrap_gen(wpas, type="nfc-uri", chan=chan, mac=True)
res = wpas.request("DPP_BOOTSTRAP_GET_URI %d" % own_id).rstrip()
if "FAIL" in res:
return None
if start_listen:
cmd = "DPP_LISTEN %d" % listen_freq
global netrole
if netrole:
cmd += " netrole=" + netrole
res2 = wpas.request(cmd)
if "OK" not in res2:
raise Exception("Failed to start listen operation (%s)" % cmd)
return res
def wpas_report_handover_req(uri):
wpas = wpas_connect()
if wpas is None:
return None
global own_id
cmd = "DPP_NFC_HANDOVER_REQ own=%d uri=%s" % (own_id, uri)
return wpas.request(cmd)
def wpas_report_handover_sel(uri):
wpas = wpas_connect()
if wpas is None:
return None
global own_id
cmd = "DPP_NFC_HANDOVER_SEL own=%d uri=%s" % (own_id, uri)
return wpas.request(cmd)
def dpp_handover_client(llc, alt=False):
chan_override = None
global alt_proposal_used
if alt:
global altchanlist
chan_override = altchanlist
alt_proposal_used = True
global test_uri, test_alt_uri
if test_uri:
summary("TEST MODE: Using specified URI (alt=%s)" % str(alt))
uri = test_alt_uri if alt else test_uri
else:
uri = wpas_get_nfc_uri(start_listen=False, chan_override=chan_override)
if uri is None:
summary("Cannot start handover client - no bootstrap URI available",
color=C_RED)
return
uri = ndef.UriRecord(uri)
summary("NFC URI record for DPP: " + str(uri))
carrier = ndef.Record('application/vnd.wfa.dpp', 'A', uri.data)
global test_crn
if test_crn:
prev, = struct.unpack('>H', test_crn)
summary("TEST MODE: Use specified crn %d" % prev)
crn = test_crn
test_crn = struct.pack('>H', prev + 0x10)
else:
crn = os.urandom(2)
hr = ndef.HandoverRequestRecord(version="1.4", crn=crn)
hr.add_alternative_carrier('active', carrier.name)
message = [hr, carrier]
summary("NFC Handover Request message for DPP: " + str(message))
global peer_crn
if peer_crn is not None and not alt:
summary("NFC handover request from peer was already received - do not send own")
return
client = nfc.handover.HandoverClient(llc)
try:
summary("Trying to initiate NFC connection handover")
client.connect()
summary("Connected for handover")
except nfc.llcp.ConnectRefused:
summary("Handover connection refused")
client.close()
return
except Exception as e:
summary("Other exception: " + str(e))
client.close()
return
if peer_crn is not None and not alt:
summary("NFC handover request from peer was already received - do not send own")
client.close()
return
summary("Sending handover request")
global my_crn, my_crn_ready, hs_sent
my_crn_ready = True
if not client.send_records(message):
my_crn_ready = False
summary("Failed to send handover request", color=C_RED)
client.close()
return
my_crn, = struct.unpack('>H', crn)
summary("Receiving handover response")
try:
message = client.recv_records(timeout=3.0)
except Exception as e:
# This is fine if we are the handover selector
if hs_sent:
summary("Client receive failed as expected since I'm the handover server: %s" % str(e))
elif alt_proposal_used and not alt:
summary("Client received failed for initial proposal as expected since alternative proposal was also used: %s" % str(e))
else:
summary("Client receive failed: %s" % str(e), color=C_RED)
message = None
if message is None:
if hs_sent:
summary("No response received as expected since I'm the handover server")
elif alt_proposal_used and not alt:
summary("No response received for initial proposal as expected since alternative proposal was also used")
else:
summary("No response received", color=C_RED)
client.close()
return
summary("Received message: " + str(message))
if len(message) < 1 or \
not isinstance(message[0], ndef.HandoverSelectRecord):
summary("Response was not Hs - received: " + message.type)
client.close()
return
summary("Received handover select message")
summary("alternative carriers: " + str(message[0].alternative_carriers))
if alt_proposal_used and not alt:
summary("Ignore received handover select for the initial proposal since alternative proposal was sent")
client.close()
return
dpp_found = False
for carrier in message:
if isinstance(carrier, ndef.HandoverSelectRecord):
continue
summary("Remote carrier type: " + carrier.type)
if carrier.type == "application/vnd.wfa.dpp":
if len(carrier.data) == 0 or carrier.data[0] != 0:
summary("URI Identifier Code 'None' not seen", color=C_RED)
continue
summary("DPP carrier type match - send to wpa_supplicant")
dpp_found = True
uri = carrier.data[1:].decode("utf-8")
summary("DPP URI: " + uri)
if test_uri:
summary("TEST MODE: Fake processing")
break
res = wpas_report_handover_sel(uri)
if res is None or "FAIL" in res:
summary("DPP handover report rejected", color=C_RED)
break
success_report("DPP handover reported successfully (initiator)")
summary("peer_id=" + res)
peer_id = int(res)
wpas = wpas_connect()
if wpas is None:
break
global enrollee_only
global config_params
if enrollee_only:
extra = " role=enrollee"
elif config_params:
extra = " role=configurator " + config_params
else:
# TODO: Single Configurator instance
res = wpas.request("DPP_CONFIGURATOR_ADD")
if "FAIL" in res:
summary("Failed to initiate Configurator", color=C_RED)
break
conf_id = int(res)
extra = " conf=sta-dpp configurator=%d" % conf_id
global own_id
summary("Initiate DPP authentication")
cmd = "DPP_AUTH_INIT peer=%d own=%d" % (peer_id, own_id)
cmd += extra
res = wpas.request(cmd)
if "FAIL" in res:
summary("Failed to initiate DPP authentication", color=C_RED)
break
global no_alt_proposal
if not dpp_found and no_alt_proposal:
summary("DPP carrier not seen in response - do not allow alternative proposal anymore")
elif not dpp_found:
summary("DPP carrier not seen in response - allow peer to initiate a new handover with different parameters")
my_crn_ready = False
my_crn = None
peer_crn = None
hs_sent = False
client.close()
summary("Returning from dpp_handover_client")
return
summary("Remove peer")
client.close()
summary("Done with handover")
global only_one
if only_one:
print("only_one -> stop loop")
global continue_loop
continue_loop = False
global no_wait
if no_wait:
print("Trying to exit..")
global terminate_now
terminate_now = True
summary("Returning from dpp_handover_client")
class HandoverServer(nfc.handover.HandoverServer):
def __init__(self, llc):
super(HandoverServer, self).__init__(llc)
self.sent_carrier = None
self.ho_server_processing = False
self.success = False
self.try_own = False
self.llc = llc
def process_handover_request_message(self, records):
self.ho_server_processing = True
global in_raw_mode
was_in_raw_mode = in_raw_mode
clear_raw_mode()
if was_in_raw_mode:
print("\n")
summary("HandoverServer - request received: " + str(records))
global my_crn, peer_crn, my_crn_ready
for carrier in records:
if not isinstance(carrier, ndef.HandoverRequestRecord):
continue
if carrier.collision_resolution_number:
peer_crn = carrier.collision_resolution_number
summary("peer_crn: %d" % peer_crn)
if my_crn is None and my_crn_ready:
summary("Still trying to send own handover request - wait a moment to see if that succeeds before checking crn values")
for i in range(10):
if my_crn is not None:
break
time.sleep(0.01)
if my_crn is not None:
summary("my_crn: %d" % my_crn)
if my_crn is not None and peer_crn is not None:
if my_crn == peer_crn:
summary("Same crn used - automatic collision resolution failed")
# TODO: Should generate a new Handover Request message
return ''
if ((my_crn & 1) == (peer_crn & 1) and my_crn > peer_crn) or \
((my_crn & 1) != (peer_crn & 1) and my_crn < peer_crn):
summary("I'm the Handover Selector Device")
pass
else:
summary("Peer is the Handover Selector device")
summary("Ignore the received request.")
return ''
hs = ndef.HandoverSelectRecord('1.4')
sel = [hs]
found = False
for carrier in records:
if isinstance(carrier, ndef.HandoverRequestRecord):
continue
summary("Remote carrier type: " + carrier.type)
if carrier.type == "application/vnd.wfa.dpp":
summary("DPP carrier type match - add DPP carrier record")
if len(carrier.data) == 0 or carrier.data[0] != 0:
summary("URI Identifier Code 'None' not seen", color=C_RED)
continue
uri = carrier.data[1:].decode("utf-8")
summary("Received DPP URI: " + uri)
global test_uri, test_alt_uri
if test_uri:
summary("TEST MODE: Using specified URI")
data = test_sel_uri if test_sel_uri else test_uri
else:
data = wpas_get_nfc_uri(start_listen=False,
pick_channel=True)
summary("Own URI (pre-processing): %s" % data)
if test_uri:
summary("TEST MODE: Fake processing")
res = "OK"
else:
res = wpas_report_handover_req(uri)
if res is None or "FAIL" in res:
summary("DPP handover request processing failed",
color=C_RED)
global altchanlist
if altchanlist:
data = wpas_get_nfc_uri(start_listen=False,
chan_override=altchanlist)
summary("Own URI (try another channel list): %s" % data)
continue
if test_alt_uri:
summary("TEST MODE: Reject initial proposal")
continue
found = True
if not test_uri:
wpas = wpas_connect()
if wpas is None:
continue
global own_id
data = wpas.request("DPP_BOOTSTRAP_GET_URI %d" % own_id).rstrip()
if "FAIL" in data:
continue
summary("Own URI (post-processing): %s" % data)
uri = ndef.UriRecord(data)
summary("Own bootstrapping NFC URI record: " + str(uri))
if not test_uri:
info = wpas.request("DPP_BOOTSTRAP_INFO %d" % own_id)
freq = None
for line in info.splitlines():
if line.startswith("use_freq="):
freq = int(line.split('=')[1])
if freq is None or freq == 0:
summary("No channel negotiated over NFC - use channel 6")
freq = 2437
else:
summary("Negotiated channel: %d MHz" % freq)
if get_status_field(wpas, "bssid[0]"):
summary("Own AP freq: %s MHz" % str(get_status_field(wpas, "freq")))
if get_status_field(wpas, "beacon_set", extra="DRIVER") is None:
summary("Enable beaconing to have radio ready for RX")
wpas.request("DISABLE")
wpas.request("SET start_disabled 0")
wpas.request("ENABLE")
cmd = "DPP_LISTEN %d" % freq
global enrollee_only
global configurator_only
if enrollee_only:
cmd += " role=enrollee"
elif configurator_only:
cmd += " role=configurator"
summary(cmd)
res = wpas.request(cmd)
if "OK" not in res:
summary("Failed to start DPP listen", color=C_RED)
break
carrier = ndef.Record('application/vnd.wfa.dpp', 'A', uri.data)
summary("Own DPP carrier record: " + str(carrier))
hs.add_alternative_carrier('active', carrier.name)
sel = [hs, carrier]
break
global hs_sent, no_alt_proposal
summary("Sending handover select: " + str(sel))
if found:
summary("Handover completed successfully")
self.success = True
hs_sent = True
elif no_alt_proposal:
summary("Do not try alternative proposal anymore - handover failed",
color=C_RED)
hs_sent = True
else:
summary("Try to initiate with alternative parameters")
self.try_own = True
hs_sent = False
no_alt_proposal = True
threading.Thread(target=llcp_worker, args=(self.llc, True)).start()
return sel
def clear_raw_mode():
import sys, tty, termios
global prev_tcgetattr, in_raw_mode
if not in_raw_mode:
return
fd = sys.stdin.fileno()
termios.tcsetattr(fd, termios.TCSADRAIN, prev_tcgetattr)
in_raw_mode = False
def getch():
import sys, tty, termios, select
global prev_tcgetattr, in_raw_mode
fd = sys.stdin.fileno()
prev_tcgetattr = termios.tcgetattr(fd)
ch = None
try:
tty.setraw(fd)
in_raw_mode = True
[i, o, e] = select.select([fd], [], [], 0.05)
if i:
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, prev_tcgetattr)
in_raw_mode = False
return ch
def dpp_tag_read(tag):
success = False
for record in tag.ndef.records:
summary(record)
summary("record type " + record.type)
if record.type == "application/vnd.wfa.dpp":
summary("DPP HS tag - send to wpa_supplicant")
success = dpp_hs_tag_read(record)
break
if isinstance(record, ndef.UriRecord):
summary("URI record: uri=" + record.uri)
summary("URI record: iri=" + record.iri)
if record.iri.startswith("DPP:"):
summary("DPP URI")
if not dpp_nfc_uri_process(record.iri):
break
success = True
else:
summary("Ignore unknown URI")
break
if success:
success_report("Tag read succeeded")
return success
def rdwr_connected_write_tag(tag):
summary("Tag found - writing - " + str(tag))
if not tag.ndef:
summary("Not a formatted NDEF tag", color=C_RED)
return
if not tag.ndef.is_writeable:
summary("Not a writable tag", color=C_RED)
return
global dpp_tag_data
if tag.ndef.capacity < len(dpp_tag_data):
summary("Not enough room for the message")
return
try:
tag.ndef.records = dpp_tag_data
except ValueError as e:
summary("Writing the tag failed: %s" % str(e), color=C_RED)
return
success_report("Tag write succeeded")
summary("Tag writing completed - remove tag", color=C_GREEN)
global only_one, operation_success
operation_success = True
if only_one:
global continue_loop
continue_loop = False
global dpp_sel_wait_remove
return dpp_sel_wait_remove
def write_nfc_uri(clf, wait_remove=True):
summary("Write NFC URI record")
data = wpas_get_nfc_uri()
if data is None:
summary("Could not get NFC URI from wpa_supplicant", color=C_RED)
return
global dpp_sel_wait_remove
dpp_sel_wait_remove = wait_remove
summary("URI: %s" % data)
uri = ndef.UriRecord(data)
summary(uri)
summary("Touch an NFC tag to write URI record", color=C_CYAN)
global dpp_tag_data
dpp_tag_data = [uri]
clf.connect(rdwr={'on-connect': rdwr_connected_write_tag})
def write_nfc_hs(clf, wait_remove=True):
summary("Write NFC Handover Select record on a tag")
data = wpas_get_nfc_uri()
if data is None:
summary("Could not get NFC URI from wpa_supplicant", color=C_RED)
return
global dpp_sel_wait_remove
dpp_sel_wait_remove = wait_remove
summary("URI: %s" % data)
uri = ndef.UriRecord(data)
summary(uri)
carrier = ndef.Record('application/vnd.wfa.dpp', 'A', uri.data)
hs = ndef.HandoverSelectRecord('1.4')
hs.add_alternative_carrier('active', carrier.name)
summary(hs)
summary(carrier)
summary("Touch an NFC tag to write HS record", color=C_CYAN)
global dpp_tag_data
dpp_tag_data = [hs, carrier]
summary(dpp_tag_data)
clf.connect(rdwr={'on-connect': rdwr_connected_write_tag})
def rdwr_connected(tag):
global only_one, no_wait
summary("Tag connected: " + str(tag))
if tag.ndef:
summary("NDEF tag: " + tag.type)
summary(tag.ndef.records)
success = dpp_tag_read(tag)
if only_one and success:
global continue_loop
continue_loop = False
else:
summary("Not an NDEF tag - remove tag", color=C_RED)
return True
return not no_wait
def llcp_worker(llc, try_alt):
print("Start of llcp_worker()")
if try_alt:
summary("Starting handover client (try_alt)")
dpp_handover_client(llc, alt=True)
summary("Exiting llcp_worker thread (try_alt)")
return
global init_on_touch
if init_on_touch:
summary("Starting handover client (init_on_touch)")
dpp_handover_client(llc)
summary("Exiting llcp_worker thread (init_on_touch)")
return
global no_input
if no_input:
summary("Wait for handover to complete")
else:
print("Wait for handover to complete - press 'i' to initiate")
global srv
global wait_connection
while not wait_connection and srv.sent_carrier is None:
if srv.try_own:
srv.try_own = False
summary("Try to initiate another handover with own parameters")
global peer_crn, my_crn, my_crn_ready, hs_sent
my_crn_ready = False
my_crn = None
peer_crn = None
hs_sent = False
dpp_handover_client(llc, alt=True)
summary("Exiting llcp_worker thread (retry with own parameters)")
return
if srv.ho_server_processing:
time.sleep(0.025)
elif no_input:
time.sleep(0.5)
else:
res = getch()
if res != 'i':
continue
clear_raw_mode()
summary("Starting handover client")
dpp_handover_client(llc)
summary("Exiting llcp_worker thread (manual init)")
return
global in_raw_mode
was_in_raw_mode = in_raw_mode
clear_raw_mode()
if was_in_raw_mode:
print("\r")
summary("Exiting llcp_worker thread")
def llcp_startup(llc):
summary("Start LLCP server")
global srv
srv = HandoverServer(llc)
return llc
def llcp_connected(llc):
summary("P2P LLCP connected")
global wait_connection, my_crn, peer_crn, my_crn_ready, hs_sent
global no_alt_proposal, alt_proposal_used
wait_connection = False
my_crn_ready = False
my_crn = None
peer_crn = None
hs_sent = False
no_alt_proposal = False
alt_proposal_used = False
global srv
srv.start()
if init_on_touch or not no_input:
threading.Thread(target=llcp_worker, args=(llc, False)).start()
return True
def llcp_release(llc):
summary("LLCP release")
return True
def terminate_loop():
global terminate_now
return terminate_now
def main():
clf = nfc.ContactlessFrontend()
parser = argparse.ArgumentParser(description='nfcpy to wpa_supplicant integration for DPP NFC operations')
parser.add_argument('-d', const=logging.DEBUG, default=logging.INFO,
action='store_const', dest='loglevel',
help='verbose debug output')
parser.add_argument('-q', const=logging.WARNING, action='store_const',
dest='loglevel', help='be quiet')
parser.add_argument('--only-one', '-1', action='store_true',
help='run only one operation and exit')
parser.add_argument('--init-on-touch', '-I', action='store_true',
help='initiate handover on touch')
parser.add_argument('--no-wait', action='store_true',
help='do not wait for tag to be removed before exiting')
parser.add_argument('--ifname', '-i',
help='network interface name')
parser.add_argument('--no-input', '-a', action='store_true',
help='do not use stdout input to initiate handover')
parser.add_argument('--tag-read-only', '-t', action='store_true',
help='tag read only (do not allow connection handover)')
parser.add_argument('--handover-only', action='store_true',
help='connection handover only (do not allow tag read)')
parser.add_argument('--enrollee', action='store_true',
help='run as Enrollee-only')
parser.add_argument('--configurator', action='store_true',
help='run as Configurator-only')
parser.add_argument('--config-params', default='',
help='configurator parameters')
parser.add_argument('--ctrl', default='/var/run/wpa_supplicant',
help='wpa_supplicant/hostapd control interface')
parser.add_argument('--summary',
help='summary file for writing status updates')
parser.add_argument('--success',
help='success file for writing success update')
parser.add_argument('--device', default='usb', help='NFC device to open')
parser.add_argument('--chan', default=None, help='channel list')
parser.add_argument('--altchan', default=None, help='alternative channel list')
parser.add_argument('--netrole', default=None, help='netrole for Enrollee')
parser.add_argument('--test-uri', default=None,
help='test mode: initial URI')
parser.add_argument('--test-alt-uri', default=None,
help='test mode: alternative URI')
parser.add_argument('--test-sel-uri', default=None,
help='test mode: handover select URI')
parser.add_argument('--test-crn', default=None,
help='test mode: hardcoded crn')
parser.add_argument('command', choices=['write-nfc-uri',
'write-nfc-hs'],
nargs='?')
args = parser.parse_args()
summary(args)
global only_one
only_one = args.only_one
global no_wait
no_wait = args.no_wait
global chanlist, altchanlist, netrole, test_uri, test_alt_uri, test_sel_uri
global test_crn
chanlist = args.chan
altchanlist = args.altchan
netrole = args.netrole
test_uri = args.test_uri
test_alt_uri = args.test_alt_uri
test_sel_uri = args.test_sel_uri
if args.test_crn:
test_crn = struct.pack('>H', int(args.test_crn))
else:
test_crn = None
logging.basicConfig(level=args.loglevel)
global init_on_touch
init_on_touch = args.init_on_touch
global enrollee_only
enrollee_only = args.enrollee
global configurator_only
configurator_only = args.configurator
global config_params
config_params = args.config_params
if args.ifname:
global ifname
ifname = args.ifname
summary("Selected ifname " + ifname)
if args.ctrl:
global wpas_ctrl
wpas_ctrl = args.ctrl
if args.summary:
global summary_file
summary_file = args.summary
if args.success:
global success_file
success_file = args.success
if args.no_input:
global no_input
no_input = True
clf = nfc.ContactlessFrontend()
global wait_connection
try:
if not clf.open(args.device):
summary("Could not open connection with an NFC device", color=C_RED)
raise SystemExit(1)
if args.command == "write-nfc-uri":
write_nfc_uri(clf, wait_remove=not args.no_wait)
if not operation_success:
raise SystemExit(1)
raise SystemExit
if args.command == "write-nfc-hs":
write_nfc_hs(clf, wait_remove=not args.no_wait)
if not operation_success:
raise SystemExit(1)
raise SystemExit
global continue_loop
while continue_loop:
global in_raw_mode
was_in_raw_mode = in_raw_mode
clear_raw_mode()
if was_in_raw_mode:
print("\r")
if args.handover_only:
summary("Waiting a peer to be touched", color=C_MAGENTA)
elif args.tag_read_only:
summary("Waiting for a tag to be touched", color=C_BLUE)
else:
summary("Waiting for a tag or peer to be touched",
color=C_GREEN)
wait_connection = True
try:
if args.tag_read_only:
if not clf.connect(rdwr={'on-connect': rdwr_connected}):
break
elif args.handover_only:
if not clf.connect(llcp={'on-startup': llcp_startup,
'on-connect': llcp_connected,
'on-release': llcp_release},
terminate=terminate_loop):
break
else:
if not clf.connect(rdwr={'on-connect': rdwr_connected},
llcp={'on-startup': llcp_startup,
'on-connect': llcp_connected,
'on-release': llcp_release},
terminate=terminate_loop):
break
except Exception as e:
summary("clf.connect failed: " + str(e))
break
global srv
if only_one and srv and srv.success:
raise SystemExit
except KeyboardInterrupt:
raise SystemExit
finally:
clf.close()
raise SystemExit
if __name__ == '__main__':
main()
|
test_logging.py
|
# Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import datetime
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok
from test import support
import textwrap
import time
import unittest
import warnings
import weakref
try:
import threading
# The following imports are needed only for tests which
# require threading
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
except ImportError:
threading = None
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
if threading:
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
try:
asyncore.loop(poll_interval, map=self._map)
except OSError:
# On FreeBSD 8, closing the server repeatably
# raises this error. We swallow it if the
# server has been closed.
if self.connected or self.accepting:
raise
def stop(self, timeout=None):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.close()
self._thread.join(timeout)
self._thread = None
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
"""
Tell the server thread to stop, and wait for it to do so.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.shutdown()
if self._thread is not None:
self._thread.join(timeout)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPHandlerTest(BaseTest):
TIMEOUT = 8.0
def test_basic(self):
sockmap = {}
server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (support.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT) # 14314: don't wait forever
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
if threading:
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop(2.0)
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
os.remove(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
if threading:
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
os.remove(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
if threading:
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls(('localhost', server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
os.remove(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
#See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#config 7 does not define compiler.parser but defines compiler.lexer
#so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
#As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
#As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
#Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
#Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
#Nothing will be output since both handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
#Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.apply_config(self.out_of_order)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = support.TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = support.TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@support.requires_multiprocessing_queue
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
@support.requires_multiprocessing_queue
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
@support.requires_type_collecting
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
if threading:
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.level = self.original_logging_level
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='foo')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, 'foo')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
@support.requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
#print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
proxy.py
|
#!/usr/bin/env python
import sys
import socket
import threading
def server_loop(local_host,local_port,remote_host,remote_port,receive_first):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server.bind((local_host,local_port))
except:
print "[!!] Failed to listen on %s:%d" % (local_host,local_port)
print "[!!] Check for other listening sockets or correct permissions."
sys.exit(0)
print "[*] Listening on %s:%d" % (local_host,local_port)
server.listen(5)
while True:
client_socket, addr = server.accept()
# print out the local connection information
print "[==>] Received incoming connection from %s:%d" % (addr[0], addr[1])
# start a thread to talk to the remote host
proxy_thread = threading.Thread(target=proxy_handler, args=(client_socket,remote_host,remote_port,receive_first))
proxy_thread.start()
def main():
print r'''
_____ ______ _____ _ _ __ __ _____ __ __
|_____] |_____/ | | \___/ \_/ |_____] \_/
| | \_ |_____| _/ \_ | . | | '''
# no fancy command-line parsing here
if len(sys.argv[1:]) != 5:
print "Usage: ./proxy.py [localhost] [localport] [remotehost] [remoteport] [receive_first]"
print "Example: ./proxy.py 127.0.0.1 9000 10.12.132.1 9000 True"
sys.exit(0)
# setup local listening parameters
local_host = sys.argv[1]
local_port = int(sys.argv[2])
# setup remote target
remote_host = sys.argv[3]
remote_port = int(sys.argv[4])
# this tells our proxy to connect and receive data
# before sending to the remote host
receive_first = sys.argv[5]
if "True" in receive_first:
receive_first = True
else:
receive_first = False
# now spin up our listening socket
server_loop(local_host,local_port,remote_host,remote_port,receive_first)
#
# Thread handler
#
def proxy_handler(client_socket, remote_host, remote_port, receive_first):
# connect to the remote host
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_socket.connect((remote_host,remote_port))
# receive data from the remote end if necessary
if receive_first:
remote_buffer = receive_from(remote_socket)
hexdump(remote_buffer)
# send it to our response handler
remote_buffer = response_handler(remote_buffer)
# if we have data to send to our local client, send it
if len(remote_buffer):
print "[<==] Sending %d bytes to localhost." % len(remote_buffer)
client_socket.send(remote_buffer)
# now lets loop and read from local,
# send to remote, send to local
# rinse, wash, repeat
while True:
# read from local host
local_buffer = receive_from(client_socket)
if len(local_buffer):
print "[==>] Received %d bytes from localhost." % len(local_buffer)
hexdump(local_buffer)
# send it to our request handler
local_buffer = request_handler(local_buffer)
# send off the data to the remote host
remote_socket.send(local_buffer)
print "[==>] Sent to remote."
# receive back the response
remote_buffer = receive_from(remote_socket)
if len(remote_buffer):
print "[<==] Received %d bytes from remote." % len(remote_buffer)
hexdump(remote_buffer)
# send to our response handler
remote_buffer = response_handler(remote_buffer)
# send the response to the local socket
client_socket.send(remote_buffer)
print "[<==] Sent to localhost."
# if no more data on either side, close the connections
if not len(local_buffer) or not len(remote_buffer):
client_socket.close()
remote_socket.close()
print "[*] No more data. Closing connections."
break
# this is a pretty hex dumping function directly taken from
# the comments here:
# http://code.activestate.com/recipes/142812-hex-dumper/
def hexdump(src, length=16):
result = []
digits = 4 if isinstance(src, unicode) else 2
for i in xrange(0, len(src), length):
s = src[i:i+length]
hexa = b' '.join(["%0*X" % (digits, ord(x)) for x in s])
text = b''.join([x if 0x20 <= ord(x) < 0x7F else b'.' for x in s])
result.append( b"%04X %-*s %s" % (i, length*(digits + 1), hexa, text) )
print b'\n'.join(result)
def receive_from(connection):
buffer = ""
# We set a 2 second timeout; depending on your
# target, this may need to be adjusted
connection.settimeout(2)
try:
# keep reading into the buffer until
# there's no more data
# or we time out
while True:
data = connection.recv(4096)
if not data:
break
buffer += data
except:
pass
return buffer
# modify any requests destined for the remote host
def request_handler(buffer):
# perform packet modifications
return buffer
# modify any responses destined for the local host
def response_handler(buffer):
# perform packet modifications
return buffer
main()
|
crawler.py
|
# -*- coding: utf-8 -*-
"""
Contains the crawling logic.
"""
from __future__ import unicode_literals, absolute_import
import base64
from collections import defaultdict
import logging
import sys
import time
from pylinkvalidator.included.bs4 import BeautifulSoup, UnicodeDammit
import pylinkvalidator.compat as compat
from pylinkvalidator.compat import (
range, HTTPError, get_url_open, unicode,
get_content_type, get_url_request, get_charset)
from pylinkvalidator.models import (
Config, WorkerInit, Response, PageCrawl,
ExceptionStr, Link, SitePage, WorkerInput, TYPE_ATTRIBUTES, HTML_MIME_TYPE,
MODE_THREAD, MODE_PROCESS, MODE_GREEN, WHEN_ALWAYS, UTF8Class,
PageStatus, PageSource, PAGE_QUEUED, PAGE_CRAWLED, VERBOSE_QUIET,
VERBOSE_NORMAL, LazyLogParam, PREFIX_ALL)
from pylinkvalidator.reporter import report
from pylinkvalidator.urlutil import (
get_clean_url_split, get_absolute_url_split,
is_link, is_similar_url_split, is_supported_scheme)
WORK_DONE = '__WORK_DONE__'
def get_logger(propagate=False):
"""Returns a logger."""
root_logger = logging.getLogger()
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
if root_logger.level != logging.CRITICAL:
logger.addHandler(handler)
logger.propagate = propagate
else:
logger.addHandler(compat.NullHandler())
return logger
class SiteCrawler(object):
"""Main crawler/orchestrator"""
def __init__(self, config, logger):
self.config = config
self.start_url_splits = list(config.start_url_splits)
self.workers = []
self.input_queue = self.build_queue(config)
self.output_queue = self.build_queue(config)
self.logger = logger
self.site = Site(self.start_url_splits, config, self.logger)
def build_logger(self):
return self.logger
def crawl(self):
worker_init = WorkerInit(
self.config.worker_config, self.input_queue,
self.output_queue, self.build_logger())
self.workers = self.get_workers(self.config, worker_init)
queue_size = len(self.start_url_splits)
for start_url_split in self.start_url_splits:
self.input_queue.put(
WorkerInput(
start_url_split, True, 0, start_url_split.netloc,
self.config.content_check),
False)
self.start_workers(self.workers, self.input_queue, self.output_queue)
self.start_progress()
while True:
page_crawl = self.output_queue.get()
queue_size -= 1
new_worker_inputs = self.process_page_crawl(page_crawl)
# We only process new pages if we did not exceed configured depth
for worker_input in new_worker_inputs:
queue_size += 1
self.input_queue.put(worker_input, False)
self.progress(page_crawl, len(self.site.pages), queue_size)
if queue_size <= 0:
self.stop_workers(self.workers, self.input_queue,
self.output_queue)
self.stop_progress()
return self.site
def start_progress(self):
if self.config.options.progress:
print("Starting crawl...")
def stop_progress(self):
if self.config.options.progress:
print("Crawling Done...\n")
def progress(self, page_crawl, done_size, queue_size):
if not self.config.options.progress:
return
total = done_size + queue_size
percent = float(done_size) / float(total) * 100.0
url = ""
if page_crawl.final_url_split:
url = page_crawl.final_url_split.geturl()
elif page_crawl.original_url_split:
url = page_crawl.original_url_split.geturl()
status = page_crawl.status
if not status:
status = "error"
print("{0} - {1} ({2} of {3} - {4:.0f}%)".format(
status, url, done_size, total, percent))
def build_queue(self, config):
"""Returns an object implementing the Queue interface."""
raise NotImplementedError()
def get_workers(self, config, worker_init):
"""Returns a sequence of workers of the desired type."""
raise NotImplementedError()
def start_workers(self, workers, input_queue, output_queue):
"""Start the workers."""
raise NotImplementedError()
def stop_workers(self, workers, input_queue, output_queue):
"""Stops the workers."""
for worker in workers:
input_queue.put(WORK_DONE)
def process_page_crawl(self, page_crawl):
"""Returns a sequence of SplitResult to crawl."""
return self.site.add_crawled_page(page_crawl)
class ThreadSiteCrawler(SiteCrawler):
"""Site Crawler with thread workers."""
def build_queue(self, config):
return compat.Queue.Queue()
def get_workers(self, config, worker_init):
from threading import Thread
workers = []
for _ in range(config.worker_size):
workers.append(
Thread(target=crawl_page, kwargs={'worker_init': worker_init}))
return workers
def start_workers(self, workers, input_queue, output_queue):
for worker in workers:
worker.start()
class ProcessSiteCrawler(SiteCrawler):
"""Site Crawler with process workers."""
def __init__(self, *args, **kwargs):
import multiprocessing
self.manager = multiprocessing.Manager()
self.ProcessClass = multiprocessing.Process
super(ProcessSiteCrawler, self).__init__(*args, **kwargs)
def build_logger(self):
"""We do not want to share a logger."""
return None
def build_queue(self, config):
return self.manager.Queue()
def get_workers(self, config, worker_init):
workers = []
for _ in range(config.worker_size):
workers.append(self.ProcessClass(
target=crawl_page, kwargs={'worker_init': worker_init}))
return workers
def start_workers(self, workers, input_queue, output_queue):
for worker in workers:
worker.start()
class GreenSiteCrawler(SiteCrawler):
"""Site Crawler with green thread workers."""
def __init__(self, *args, **kwargs):
from gevent import monkey, queue, Greenlet
# TODO thread=false should be used to remove useless exception
# But weird behavior sometimes happen when it is not patched...
monkey.patch_all()
self.QueueClass = queue.Queue
self.GreenClass = Greenlet
super(GreenSiteCrawler, self).__init__(*args, **kwargs)
def build_queue(self, config):
return self.QueueClass()
def get_workers(self, config, worker_init):
workers = []
for _ in range(config.worker_size):
workers.append(self.GreenClass(
crawl_page, worker_init=worker_init))
return workers
def start_workers(self, workers, input_queue, output_queue):
for worker in workers:
worker.start()
class PageCrawler(object):
"""Worker that parses a page and extracts links"""
def __init__(self, worker_init):
self.worker_config = worker_init.worker_config
self.input_queue = worker_init.input_queue
self.output_queue = worker_init.output_queue
self.urlopen = get_url_open()
self.request_class = get_url_request()
self.logger = worker_init.logger
if not self.logger:
# Get a new one!
self.logger = get_logger()
# We do this here to allow patching by gevent
import socket
self.timeout_exception = socket.timeout
self.auth_header = None
if self.worker_config.username and self.worker_config.password:
base64string = unicode(
base64.encodestring(
'{0}:{1}'.format(
self.worker_config.username,
self.worker_config.password)
.encode("utf-8")), "utf-8")
self.auth_header = ("Authorization",
"Basic {0}".format(base64string))
def crawl_page_forever(self):
"""Starts page crawling loop for this worker."""
while True:
worker_input = self.input_queue.get()
if worker_input == WORK_DONE:
# No more work! Pfew!
return
else:
page_crawl = self._crawl_page(worker_input)
self.output_queue.put(page_crawl)
def _crawl_page(self, worker_input):
page_crawl = None
erroneous_content = []
missing_content = []
url_split_to_crawl = worker_input.url_split
try:
response = open_url(
self.urlopen, self.request_class,
url_split_to_crawl.geturl(), self.worker_config.timeout,
self.timeout_exception, self.auth_header,
extra_headers=self.worker_config.extra_headers,
logger=self.logger)
if response.exception:
if response.status:
# This is a http error. Good.
page_crawl = PageCrawl(
original_url_split=url_split_to_crawl,
final_url_split=None, status=response.status,
is_timeout=False, is_redirect=False, links=[],
exception=None, is_html=False,
depth=worker_input.depth,
response_time=response.response_time,
process_time=None,
site_origin=worker_input.site_origin)
elif response.is_timeout:
# This is a timeout. No need to wrap the exception
page_crawl = PageCrawl(
original_url_split=url_split_to_crawl,
final_url_split=None, status=None,
is_timeout=True, is_redirect=False, links=[],
exception=None, is_html=False,
depth=worker_input.depth,
response_time=response.response_time,
process_time=0,
site_origin=worker_input.site_origin)
else:
# Something bad happened when opening the url
exception = ExceptionStr(
unicode(type(response.exception)),
unicode(response.exception))
page_crawl = PageCrawl(
original_url_split=url_split_to_crawl,
final_url_split=None, status=None,
is_timeout=False, is_redirect=False, links=[],
exception=exception, is_html=False,
depth=worker_input.depth,
response_time=response.response_time,
process_time=0,
site_origin=worker_input.site_origin)
else:
final_url_split = get_clean_url_split(response.final_url)
message = response.content.info()
mime_type = get_content_type(message)
if self.worker_config.prefer_server_encoding:
charset = get_charset(message)
else:
charset = None
links = []
is_html = mime_type == HTML_MIME_TYPE
process_time = None
if is_html and worker_input.should_crawl:
start = time.time()
html_soup = BeautifulSoup(
response.content, self.worker_config.parser,
from_encoding=charset)
links = self.get_links(html_soup, final_url_split)
if self._has_content_to_check(worker_input):
(missing_content, erroneous_content) =\
self.check_content(
unicode(html_soup), html_soup,
url_split_to_crawl,
final_url_split, worker_input.content_check)
process_time = time.time() - start
else:
self.logger.debug(
"Won't crawl %s. MIME Type: %s. Should crawl: %s",
final_url_split, mime_type,
worker_input.should_crawl)
if self._has_content_to_check(worker_input):
text_content = self.get_text_content(
response.content.read(), charset)
(missing_content, erroneous_content) =\
self.check_content(
text_content, None, url_split_to_crawl,
final_url_split, worker_input.content_check)
page_crawl = PageCrawl(
original_url_split=url_split_to_crawl,
final_url_split=final_url_split, status=response.status,
is_timeout=False, is_redirect=response.is_redirect,
links=links, exception=None, is_html=is_html,
depth=worker_input.depth,
response_time=response.response_time,
process_time=process_time,
site_origin=worker_input.site_origin,
missing_content=missing_content,
erroneous_content=erroneous_content)
except Exception as exc:
exception = ExceptionStr(unicode(type(exc)), unicode(exc))
page_crawl = PageCrawl(
original_url_split=url_split_to_crawl,
final_url_split=None, status=None,
is_timeout=False, is_redirect=False, links=[],
exception=exception, is_html=False,
depth=worker_input.depth,
response_time=None,
process_time=None,
site_origin=worker_input.site_origin)
self.logger.exception("Exception occurred while crawling a page.")
return page_crawl
def _has_content_to_check(self, worker_input):
return worker_input.content_check and\
worker_input.content_check.has_something_to_check
def get_text_content(self, binary_blob, charset):
"""Retrieves unicode content from response binary blob.
"""
override_encodings = []
if charset:
override_encodings.append(charset)
return UnicodeDammit(binary_blob, override_encodings).unicode_markup
def check_content(
self, response_content, html_soup, original_url_split,
final_url_split, content_check):
"""Ensures that the specified content is present (or absent).
"""
missing_content = []
erroneous_content = []
if html_soup:
for content, found in self.check_html_content_single(
content_check.html_presence, html_soup, original_url_split,
final_url_split):
if not found:
missing_content.append(content)
if html_soup:
for content, found in self.check_html_content_single(
content_check.html_absence, html_soup, original_url_split,
final_url_split):
if found:
erroneous_content.append(content)
for content, found in self.check_text_content_single(
content_check.text_presence, response_content,
original_url_split, final_url_split):
if not found:
missing_content.append(content)
for content, found in self.check_text_content_single(
content_check.text_absence, response_content,
original_url_split, final_url_split):
if found:
erroneous_content.append(content)
return (missing_content, erroneous_content)
def check_html_content_single(
self, html_to_check, html_soup, original_url_split,
final_url_split):
"""Returns a list of tuple (content, presence) indicating whether an
html tag was present or not in the source.
"""
content = []
for key, html_check_list in html_to_check.items():
if key == PREFIX_ALL or\
is_similar_url_split(key, original_url_split) or\
is_similar_url_split(key, final_url_split):
# we check
for html_check in html_check_list:
kwargs = {}
if html_check.attrs:
kwargs["attrs"] = html_check.attrs
if html_check.content:
# XXX Use text because the included bs4 does not use
# the new string parameter and text is backward
# compatible.
kwargs["text"] = html_check.content
found = html_soup.find(
html_check.tag, **kwargs) is not None
content.append((str(html_check), found))
return content
def check_text_content_single(
self, text_content_to_check, full_text, original_url_split,
final_url_split):
"""Returns a list of tuple (content, presence) indicating whether an
html tag was present or not in the source.
"""
content = []
for key, text_check_list in text_content_to_check.items():
if key == PREFIX_ALL or\
is_similar_url_split(key, original_url_split) or\
is_similar_url_split(key, final_url_split):
# we check
for text_check in text_check_list:
try:
match = text_check.search(full_text)
content.append((text_check.pattern, match is not None))
except AttributeError:
found = text_check in full_text
content.append((text_check, found))
return content
def get_links(self, html_soup, original_url_split):
"""Gets links for desired types (e.g., a, link, img, script)
:param html_soup: The page parsed by BeautifulSoup
:param original_url_split: The URL of the page used to resolve relative
links.
:rtype: A sequence of Link objects
"""
# This is a weird html tag that defines the base URL of a page.
base_url_split = original_url_split
bases = html_soup.find_all('base')
if bases:
base = bases[0]
if 'href' in base.attrs:
base_url_split = get_clean_url_split(base['href'])
links = []
for element_type in self.worker_config.types:
if element_type not in TYPE_ATTRIBUTES:
raise Exception(
"Unknown element type: {0}".format(element_type))
attribute = TYPE_ATTRIBUTES[element_type]
element_links = html_soup.find_all(element_type)
links.extend(self._get_links(
element_links, attribute, base_url_split, original_url_split))
return links
def _get_links(self, elements, attribute, base_url_split,
original_url_split):
links = []
for element in elements:
if attribute in element.attrs:
url = element[attribute]
if not self.worker_config.strict_mode:
url = url.strip()
if not is_link(url):
continue
abs_url_split = get_absolute_url_split(url, base_url_split)
if not is_supported_scheme(
abs_url_split, self.worker_config.ignore_bad_tel_urls):
continue
link = Link(
type=unicode(element.name), url_split=abs_url_split,
original_url_split=original_url_split,
source_str=unicode(element))
links.append(link)
return links
class Site(UTF8Class):
"""Contains all the visited and visiting pages of a site.
This class is NOT thread-safe and should only be accessed by one thread at
a time!
"""
def __init__(self, start_url_splits, config, logger=None):
self.start_url_splits = start_url_splits
self.pages = {}
"""Map of url:SitePage"""
self.multi_pages = defaultdict(dict)
"""Map of netloc:map(url:SitePage). Only used in multi sites mode."""
self.error_pages = {}
"""Map of url:SitePage with is_ok=False"""
self.multi_error_pages = defaultdict(dict)
"""Map of netloc:map(url:SitePage). Only used in multi sites
mode."""
self.page_statuses = {}
"""Map of url:PageStatus (PAGE_QUEUED, PAGE_CRAWLED)"""
self.config = config
self.logger = logger
for start_url_split in self.start_url_splits:
self.page_statuses[start_url_split] = PageStatus(PAGE_QUEUED, [])
def collect_multi_sites(self):
"""Collects page results and maps them to their respective domain in
multi_pages and multi_error_pages.
"""
for url, page in self.pages.items():
self.multi_pages[page.site_origin][url] = page
for url, page in self.error_pages.items():
self.multi_error_pages[page.site_origin][url] = page
@property
def is_ok(self):
"""Returns True if there is no error page."""
return len(self.error_pages) == 0
def add_crawled_page(self, page_crawl):
"""Adds a crawled page. Returns a list of url split to crawl"""
if page_crawl.original_url_split not in self.page_statuses:
self.logger.warning("Original URL not seen before!")
return []
status = self.page_statuses[page_crawl.original_url_split]
# Mark it as crawled
self.page_statuses[page_crawl.original_url_split] = PageStatus(
PAGE_CRAWLED, None)
if page_crawl.original_url_split in self.pages:
self.logger.warning(
"Original URL already crawled! Concurrency issue!")
return []
final_url_split = page_crawl.final_url_split
if not final_url_split:
# Happens on 404/500/timeout/error
final_url_split = page_crawl.original_url_split
if final_url_split in self.pages:
# This means that we already processed this final page.
# It's a redirect. Just add a source
site_page = self.pages[final_url_split]
site_page.add_sources(status.sources)
else:
# We never crawled this page before
is_local = self.config.is_local(final_url_split)
site_page = SitePage(
final_url_split, page_crawl.status,
page_crawl.is_timeout, page_crawl.exception,
page_crawl.is_html, is_local,
response_time=page_crawl.response_time,
process_time=page_crawl.process_time,
site_origin=page_crawl.site_origin,
missing_content=page_crawl.missing_content,
erroneous_content=page_crawl.erroneous_content)
site_page.add_sources(status.sources)
self.pages[final_url_split] = site_page
if not site_page.is_ok:
self.error_pages[final_url_split] = site_page
return self.process_links(page_crawl)
def process_links(self, page_crawl):
links_to_process = []
source_url_split = page_crawl.original_url_split
if page_crawl.final_url_split:
source_url_split = page_crawl.final_url_split
for link in page_crawl.links:
url_split = link.url_split
if not self.config.should_download(url_split):
self.logger.debug(
"Won't download %s. Is local? %s",
url_split,
LazyLogParam(lambda: self.config.is_local(url_split)))
continue
page_status = self.page_statuses.get(url_split, None)
page_source = PageSource(source_url_split, link.source_str)
if not page_status:
# We never encountered this url before
self.page_statuses[url_split] = PageStatus(
PAGE_QUEUED, [page_source])
should_crawl = self.config.should_crawl(
url_split, page_crawl.depth)
links_to_process.append(WorkerInput(
url_split, should_crawl, page_crawl.depth + 1,
page_crawl.site_origin, self.config.content_check))
elif page_status.status == PAGE_CRAWLED:
# Already crawled. Add source
if url_split in self.pages:
self.pages[url_split].add_sources([page_source])
else:
# TODO the final url is different. need a way to link it...
pass
elif page_status.status == PAGE_QUEUED:
# Already queued for crawling. Add source.
page_status.sources.append(page_source)
return links_to_process
def get_average_response_time(self):
"""Computes the average response time of pages that returned an HTTP
code (good or bad). Exceptions such as timeout are ignored.
"""
response_time_sum = 0
total = 0
for page in self.pages.values():
if page.response_time is not None:
response_time_sum += page.response_time
total += 1
if total > 0:
return float(response_time_sum) / float(total)
else:
return 0
def get_average_process_time(self):
"""Computes the average process (parse) time of pages that returned an HTTP
code (good or bad). Exceptions are ignored.
"""
process_time_sum = 0
total = 0
for page in self.pages.values():
if page.process_time is not None:
process_time_sum += page.process_time
total += 1
if total > 0:
return float(process_time_sum) / float(total)
else:
return 0
def __unicode__(self):
return "Site for {0}".format(self.start_url_splits)
def crawl_page(worker_init):
"""Safe redirection to the page crawler"""
page_crawler = PageCrawler(worker_init)
page_crawler.crawl_page_forever()
def open_url(open_func, request_class, url, timeout, timeout_exception,
auth_header=None, extra_headers=None, logger=None):
"""Opens a URL and returns a Response object.
All parameters are required to be able to use a patched version of the
Python standard library (i.e., patched by gevent)
:param open_func: url open function, typicaly urllib2.urlopen
:param request_class: the request class to use
:param url: the url to open
:param timeout: number of seconds to wait before timing out
:param timeout_exception: the exception thrown by open_func if a timeout
occurs
:param auth_header: authentication header
:param extra_headers: dict of {Header: Value}
:param logger: logger used to log exceptions
:rtype: A Response object
"""
try:
request = request_class(url)
if auth_header:
request.add_header(auth_header[0], auth_header[1])
if extra_headers:
for header, value in extra_headers.items():
request.add_header(header, value)
start = time.time()
output_value = open_func(request, timeout=timeout)
stop = time.time()
final_url = output_value.geturl()
code = output_value.getcode()
response = Response(
content=output_value, status=code, exception=None,
original_url=url, final_url=final_url,
is_redirect=final_url != url, is_timeout=False,
response_time=stop-start)
except HTTPError as http_error:
stop = time.time()
code = http_error.code
response = Response(
content=None, status=code, exception=http_error,
original_url=url, final_url=None, is_redirect=False,
is_timeout=False, response_time=stop-start)
except timeout_exception as t_exception:
response = Response(
content=None, status=None, exception=t_exception,
original_url=url, final_url=None, is_redirect=False,
is_timeout=True, response_time=None)
except Exception as exc:
if logger:
logger.warning("Exception while opening an URL", exc_info=True)
response = Response(
content=None, status=None, exception=exc,
original_url=url, final_url=None, is_redirect=False,
is_timeout=False, response_time=None)
return response
def execute_from_command_line():
"""Runs the crawler and retrieves the configuration from the command
line.
"""
try:
start = time.time()
config = Config()
config.parse_cli_config()
logger = configure_logger(config)
crawler = execute_from_config(config, logger)
stop = time.time()
if not crawler.site.is_ok or config.options.when == WHEN_ALWAYS:
report(crawler.site, config, stop - start, logger)
if not crawler.site.is_ok:
sys.exit(1)
except Exception as e:
print(e)
sys.exit(1)
def configure_logger(config):
"""Configures a logger based on the configuration."""
if config.options.verbose == VERBOSE_QUIET:
logging.basicConfig(level=logging.CRITICAL)
elif config.options.verbose == VERBOSE_NORMAL:
logging.basicConfig(level=logging.WARNING)
else:
logging.basicConfig(level=logging.DEBUG)
logger = get_logger()
return logger
def execute_from_config(config, logger):
"""Executes a crawler given a config and logger."""
if not config.start_urls:
raise Exception("At least one starting URL must be supplied.")
if config.options.allow_insecure_content:
# Ref: https://www.python.org/dev/peps/pep-0476/#opting-out
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
if config.options.mode == MODE_THREAD:
crawler = ThreadSiteCrawler(config, logger)
elif config.options.mode == MODE_PROCESS:
crawler = ProcessSiteCrawler(config, logger)
elif config.options.mode == MODE_GREEN:
crawler = GreenSiteCrawler(config, logger)
if not crawler:
raise Exception("Invalid crawling mode supplied.")
crawler.crawl()
if config.options.multi:
crawler.site.collect_multi_sites()
return crawler
|
faucet_mininet_test.py
|
#!/usr/bin/env python
"""Mininet tests for FAUCET.
* must be run as root
* you can run a specific test case only, by adding the class name of the test
case to the command. Eg ./faucet_mininet_test.py FaucetUntaggedIPv4RouteTest
It is strong recommended to run these tests via Docker, to ensure you have
all dependencies correctly installed. See ../docs/.
"""
# pylint: disable=missing-docstring
# pylint: disable=unused-wildcard-import
import collections
import glob
import inspect
import os
import sys
import getopt
import re
import shutil
import subprocess
import tempfile
import threading
import time
import unittest
import yaml
from concurrencytest import ConcurrentTestSuite, fork_for_tests
from mininet.log import setLogLevel
from mininet.clean import Cleanup
from packaging import version
import faucet_mininet_test_util
# pylint: disable=wildcard-import
from faucet_mininet_test_unit import *
EXTERNAL_DEPENDENCIES = (
('ryu-manager', ['--version'],
'ryu-manager', r'ryu-manager (\d+\.\d+)\n', "4.9"),
('ovs-vsctl', ['--version'], 'Open vSwitch',
r'ovs-vsctl\s+\(Open vSwitch\)\s+(\d+\.\d+)\.\d+\n', "2.3"),
('tcpdump', ['-h'], 'tcpdump',
r'tcpdump\s+version\s+(\d+\.\d+)\.\d+\n', "4.5"),
('nc', [], 'nc from the netcat-openbsd', '', 0),
('vconfig', [], 'the VLAN you are talking about', '', 0),
('2to3', ['--help'], 'Usage: 2to3', '', 0),
('fuser', ['-V'], r'fuser \(PSmisc\)',
r'fuser \(PSmisc\) (\d+\.\d+)\n', "22.0"),
('mn', ['--version'], r'\d+\.\d+.\d+',
r'(\d+\.\d+).\d+', "2.2"),
('exabgp', ['--version'], 'ExaBGP',
r'ExaBGP : (\d+\.\d+).\d+', "3.4"),
('pip', ['show', 'influxdb'], 'influxdb',
r'Version:\s+(\d+\.\d+)\.\d+', "3.0"),
('pylint', ['--version'], 'pylint',
r'pylint (\d+\.\d+).\d+,', "1.6"),
('curl', ['--version'], 'libcurl',
r'curl (\d+\.\d+).\d+', "7.3"),
('ladvd', ['-h'], 'ladvd',
r'ladvd version (\d+\.\d+)\.\d+', "1.1"),
('iperf', ['--version'], 'iperf',
r'iperf version (\d+\.\d+)\.\d+', "2.0"),
('fping', ['-v'], 'fping',
r'fping: Version (\d+\.\d+)', "3.13"),
('rdisc6', ['-V'], 'ndisc6',
r'ndisc6.+tool (\d+\.\d+)', "1.0"),
)
# Must pass with 0 lint errors
FAUCET_LINT_SRCS = glob.glob(
os.path.join(faucet_mininet_test_util.FAUCET_DIR, '*py'))
FAUCET_TEST_LINT_SRCS = glob.glob(
os.path.join(os.path.dirname(__file__), 'faucet_mininet_test*py'))
# Maximum number of parallel tests to run at once
MAX_PARALLEL_TESTS = 6
# see hw_switch_config.yaml for how to bridge in an external hardware switch.
HW_SWITCH_CONFIG_FILE = 'hw_switch_config.yaml'
CONFIG_FILE_DIRS = ['/etc/ryu/faucet', './']
REQUIRED_TEST_PORTS = 4
def import_hw_config():
"""Import configuration for physical switch testing."""
for config_file_dir in CONFIG_FILE_DIRS:
config_file_name = os.path.join(config_file_dir, HW_SWITCH_CONFIG_FILE)
if os.path.isfile(config_file_name):
break
if os.path.isfile(config_file_name):
print('Using config from %s' % config_file_name)
else:
print('Cannot find %s in %s' % (HW_SWITCH_CONFIG_FILE, CONFIG_FILE_DIRS))
sys.exit(-1)
try:
with open(config_file_name, 'r') as config_file:
config = yaml.load(config_file)
except IOError:
print('Could not load YAML config data from %s' % config_file_name)
sys.exit(-1)
if 'hw_switch' in config:
hw_switch = config['hw_switch']
if not isinstance(hw_switch, bool):
print('hw_switch must be a bool: ' % hw_switch)
sys.exit(-1)
if not hw_switch:
return None
required_config = {
'dp_ports': (dict,),
'cpn_intf': (str,),
'dpid': (long, int),
'of_port': (int,),
'gauge_of_port': (int,),
}
for required_key, required_key_types in list(required_config.items()):
if required_key not in config:
print('%s must be specified in %s to use HW switch.' % (
required_key, config_file_name))
sys.exit(-1)
required_value = config[required_key]
key_type_ok = False
for key_type in required_key_types:
if isinstance(required_value, key_type):
key_type_ok = True
break
if not key_type_ok:
print('%s (%s) must be %s in %s' % (
required_key, required_value,
required_key_types, config_file_name))
sys.exit(1)
dp_ports = config['dp_ports']
if len(dp_ports) != REQUIRED_TEST_PORTS:
print('Exactly %u dataplane ports are required, '
'%d are provided in %s.' %
(REQUIRED_TEST_PORTS, len(dp_ports), config_file_name))
return config
else:
return None
def check_dependencies():
"""Verify dependant libraries/binaries are present with correct versions."""
print('Checking library/binary dependencies')
for (binary, binary_get_version, binary_present_re,
binary_version_re, binary_minversion) in EXTERNAL_DEPENDENCIES:
binary_args = [binary] + binary_get_version
required_binary = 'required binary/library %s' % (
' '.join(binary_args))
try:
proc = subprocess.Popen(
binary_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
proc_out, proc_err = proc.communicate()
binary_output = proc_out
if proc_err is not None:
binary_output += proc_err
except subprocess.CalledProcessError:
# Might have run successfully, need to parse output
pass
except OSError:
print('could not run %s' % required_binary)
return False
present_match = re.search(binary_present_re, binary_output)
if not present_match:
print('%s not present or did not return expected string %s' % (
required_binary, binary_present_re))
return False
if binary_version_re:
version_match = re.search(binary_version_re, binary_output)
if version_match is None:
print('could not get version from %s (%s)' % (
required_binary, binary_output))
return False
try:
binary_version = version_match.group(1)
except ValueError:
print('cannot parse version %s for %s' % (
version_match, required_binary))
return False
if version.parse(binary_version) < version.parse(binary_minversion):
print('%s version %s is less than required version %s' % (
required_binary, binary_version, binary_minversion))
return False
return True
def lint_check():
"""Run pylint on required source files."""
print('Running pylint checks')
for faucet_src in FAUCET_LINT_SRCS: # + FAUCET_TEST_LINT_SRCS:
ret = subprocess.call(
['env',
'PYTHONPATH=%s' % faucet_mininet_test_util.FAUCET_DIR,
'pylint',
'--rcfile=/dev/null',
'-E', faucet_src])
if ret:
print(('pylint of %s returns an error' % faucet_src))
return False
for faucet_src in FAUCET_LINT_SRCS:
output_2to3 = subprocess.check_output(
['2to3', '--nofix=import', faucet_src],
stderr=open(os.devnull, 'wb'))
if output_2to3:
print(('2to3 of %s returns a diff (not python3 compatible)' % faucet_src))
print(output_2to3)
return False
return True
def make_suite(tc_class, hw_config, root_tmpdir, ports_sock):
"""Compose test suite based on test class names."""
testloader = unittest.TestLoader()
testnames = testloader.getTestCaseNames(tc_class)
suite = unittest.TestSuite()
for name in testnames:
suite.addTest(tc_class(name, hw_config, root_tmpdir, ports_sock))
return suite
def pipeline_superset_report(root_tmpdir):
ofchannel_logs = glob.glob(
os.path.join(root_tmpdir, '*/ofchannel.log'))
match_re = re.compile(
r'^.+types table: (\d+) match: (.+) instructions: (.+) actions: (.+)')
table_matches = collections.defaultdict(set)
table_instructions = collections.defaultdict(set)
table_actions = collections.defaultdict(set)
for log in ofchannel_logs:
for log_line in open(log).readlines():
match = match_re.match(log_line)
if match:
table, matches, instructions, actions = match.groups()
table = int(table)
table_matches[table].update(eval(matches))
table_instructions[table].update(eval(instructions))
table_actions[table].update(eval(actions))
print('')
for table in sorted(table_matches):
print('table: %u' % table)
print(' matches: %s' % sorted(table_matches[table]))
print(' table_instructions: %s' % sorted(table_instructions[table]))
print(' table_actions: %s' % sorted(table_actions[table]))
def expand_tests(requested_test_classes, excluded_test_classes,
hw_config, root_tmpdir, ports_sock, serial):
total_tests = 0
sanity_tests = unittest.TestSuite()
single_tests = unittest.TestSuite()
parallel_tests = unittest.TestSuite()
for name, obj in inspect.getmembers(sys.modules[__name__]):
if not inspect.isclass(obj):
continue
if requested_test_classes and name not in requested_test_classes:
continue
if excluded_test_classes and name in excluded_test_classes:
continue
if name.endswith('Test') and name.startswith('Faucet'):
# TODO: hardware testing should have a way to configure
# which switch in a string is the hardware switch to test.
if re.search(r'Faucet.*String', name) and hw_config is not None:
print(
'skipping %s as string tests not supported for hardware' % name)
continue
print('adding test %s' % name)
test_suite = make_suite(obj, hw_config, root_tmpdir, ports_sock)
if name.startswith('FaucetSanity'):
sanity_tests.addTest(test_suite)
else:
if serial or name.startswith('FaucetSingle'):
single_tests.addTest(test_suite)
total_tests += 1
else:
parallel_tests.addTest(test_suite)
total_tests += 1
return (total_tests, sanity_tests, single_tests, parallel_tests)
def run_test_suites(sanity_tests, single_tests, parallel_tests):
all_successful = False
sanity_runner = unittest.TextTestRunner(verbosity=255, failfast=True)
sanity_result = sanity_runner.run(sanity_tests)
if sanity_result.wasSuccessful():
print('running %u tests in parallel and %u tests serial' % (
parallel_tests.countTestCases(), single_tests.countTestCases()))
results = []
if parallel_tests.countTestCases():
max_parallel_tests = min(parallel_tests.countTestCases(), MAX_PARALLEL_TESTS)
parallel_runner = unittest.TextTestRunner(verbosity=255)
parallel_suite = ConcurrentTestSuite(
parallel_tests, fork_for_tests(max_parallel_tests))
results.append(parallel_runner.run(parallel_suite))
# TODO: Tests that are serialized generally depend on hardcoded ports.
# Make them use dynamic ports.
if single_tests.countTestCases():
single_runner = unittest.TextTestRunner(verbosity=255)
results.append(single_runner.run(single_tests))
all_successful = True
for result in results:
if not result.wasSuccessful():
all_successful = False
print(result.printErrors())
else:
print('sanity tests failed - test environment not correct')
return all_successful
def start_port_server(root_tmpdir):
ports_sock = os.path.join(root_tmpdir, '.ports-server')
ports_server = threading.Thread(
target=faucet_mininet_test_util.serve_ports, args=(ports_sock,))
ports_server.setDaemon(True)
ports_server.start()
for _ in range(10):
if os.path.exists(ports_sock):
break
time.sleep(1)
if not os.path.exists(ports_sock):
print('ports server did not start (%s not created)' % ports_sock)
sys.exit(-1)
return ports_sock
def run_tests(requested_test_classes,
excluded_test_classes,
keep_logs,
serial,
hw_config):
"""Actually run the test suites, potentially in parallel."""
if hw_config is not None:
print('Testing hardware, forcing test serialization')
serial = True
root_tmpdir = tempfile.mkdtemp(prefix='faucet-tests-')
ports_sock = start_port_server(root_tmpdir)
total_tests, sanity_tests, single_tests, parallel_tests = expand_tests(
requested_test_classes, excluded_test_classes,
hw_config, root_tmpdir, ports_sock, serial)
all_successful = run_test_suites(
sanity_tests, single_tests, parallel_tests)
pipeline_superset_report(root_tmpdir)
os.remove(ports_sock)
if not keep_logs and all_successful:
shutil.rmtree(root_tmpdir)
if not all_successful:
sys.exit(-1)
def parse_args():
"""Parse command line arguments."""
try:
opts, args = getopt.getopt(
sys.argv[1:],
'cknsx:',
['clean', 'nocheck', 'keep_logs', 'serial'])
except getopt.GetoptError as err:
print(str(err))
sys.exit(2)
clean = False
keep_logs = False
nocheck = False
serial = False
excluded_test_classes = []
for opt, arg in opts:
if opt in ('-c', '--clean'):
clean = True
if opt in ('-n', '--nocheck'):
nocheck = True
if opt in ('-k', '--keep_logs'):
keep_logs = True
if opt in ('-s', '--serial'):
serial = True
if opt == '-x':
excluded_test_classes.append(arg)
return (args, clean, keep_logs, nocheck, serial, excluded_test_classes)
def test_main():
"""Test main."""
setLogLevel('info')
args, clean, keep_logs, nocheck, serial, excluded_test_classes = parse_args()
if clean:
print('Cleaning up test interfaces, processes and openvswitch '
'configuration from previous test runs')
Cleanup.cleanup()
sys.exit(0)
if nocheck:
print('Skipping dependencies/lint checks')
else:
if not check_dependencies():
print('dependency check failed. check required library/binary '
'list in header of this script')
sys.exit(-1)
if not lint_check():
print('pylint must pass with no errors')
sys.exit(-1)
hw_config = import_hw_config()
run_tests(args, excluded_test_classes, keep_logs, serial, hw_config)
if __name__ == '__main__':
test_main()
|
test_cli.py
|
#!/usr/bin/python3
"""
(C) 2018,2019 Jack Lloyd
Botan is released under the Simplified BSD License (see license.txt)
"""
import subprocess
import sys
import os
import logging
import optparse # pylint: disable=deprecated-module
import time
import shutil
import tempfile
import re
import random
import json
import binascii
import platform
import multiprocessing
from multiprocessing.pool import ThreadPool
# pylint: disable=global-statement,unused-argument
CLI_PATH = None
TESTS_RUN = 0
TESTS_FAILED = 0
def run_socket_tests():
# Some of the socket tests fail on FreeBSD CI, for reasons unknown.
# Connecting to the server port fails. Possibly a local firewall?
return platform.system().lower() != "freebsd"
class TestLogHandler(logging.StreamHandler, object):
def emit(self, record):
# Do the default stuff first
super(TestLogHandler, self).emit(record)
if record.levelno >= logging.ERROR:
global TESTS_FAILED
TESTS_FAILED += 1
def setup_logging(options):
if options.verbose:
log_level = logging.DEBUG
elif options.quiet:
log_level = logging.WARNING
else:
log_level = logging.INFO
lh = TestLogHandler(sys.stdout)
lh.setFormatter(logging.Formatter('%(levelname) 7s: %(message)s'))
logging.getLogger().addHandler(lh)
logging.getLogger().setLevel(log_level)
def random_port_number():
return random.randint(1024, 65535)
def test_cli(cmd, cmd_options, expected_output=None, cmd_input=None, expected_stderr=None, use_drbg=True):
global TESTS_RUN
TESTS_RUN += 1
opt_list = []
if isinstance(cmd_options, str):
opt_list = cmd_options.split(' ')
elif isinstance(cmd_options, list):
opt_list = cmd_options
if use_drbg:
fixed_drbg_seed = "802" * 32
drbg_options = ['--rng-type=drbg', '--drbg-seed=' + fixed_drbg_seed]
else:
drbg_options = []
cmdline = [CLI_PATH, cmd] + drbg_options + opt_list
logging.debug("Executing '%s'", ' '.join([CLI_PATH, cmd] + opt_list))
stdout = None
stderr = None
if cmd_input is None:
proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
else:
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate(cmd_input.encode())
stdout = stdout.decode('ascii').strip()
stderr = stderr.decode('ascii').strip()
if stderr:
if expected_stderr is None:
logging.error("Got output on stderr %s (stdout was %s)", stderr, stdout)
else:
if stderr != expected_stderr:
logging.error("Got output on stderr %s which did not match expected value %s", stderr, expected_stderr)
else:
if expected_stderr is not None:
logging.error('Expected output on stderr but got nothing')
if expected_output is not None:
if stdout != expected_output:
logging.error("Got unexpected output running cmd %s %s", cmd, cmd_options)
logging.info("Output lengths %d vs expected %d", len(stdout), len(expected_output))
logging.info("Got %s", stdout)
logging.info("Exp %s", expected_output)
return stdout
def check_for_command(cmd):
cmdline = [CLI_PATH, 'has_command', cmd]
proc = subprocess.Popen(cmdline)
proc.communicate()
return proc.returncode == 0
def cli_config_tests(_tmp_dir):
prefix = test_cli("config", "prefix")
cflags = test_cli("config", "cflags")
ldflags = test_cli("config", "ldflags")
libs = test_cli("config", "libs")
if len(prefix) < 4 or prefix[0] != '/':
logging.error("Bad prefix %s", prefix)
if ("-I%s/include/botan-3" % (prefix)) not in cflags:
logging.error("Bad cflags %s", cflags)
if not ldflags.endswith(("-L%s/lib" % (prefix))):
logging.error("Bad ldflags %s", ldflags)
if "-lbotan-3" not in libs:
logging.error("Bad libs %s", libs)
def cli_help_tests(_tmp_dir):
output = test_cli("help", None, None)
# Maybe test format somehow??
if len(output) < 500:
logging.error("Help output seems very short")
def cli_version_tests(_tmp_dir):
output = test_cli("version", None, None)
version_re = re.compile(r'[0-9]\.[0-9]+\.[0-9](\-[a-z]+[0-9]+)?')
if not version_re.match(output):
logging.error("Unexpected version output %s", output)
output = test_cli("version", ["--full"], None, None)
version_full_re = re.compile(r'Botan [0-9]\.[0-9]+\.[0-9](\-[a-z]+[0-9]+)? \(.* revision .*, distribution .*\)$')
if not version_full_re.match(output):
logging.error("Unexpected version output %s", output)
def cli_is_prime_tests(_tmp_dir):
test_cli("is_prime", "5", "5 is probably prime")
test_cli("is_prime", "9", "9 is composite")
test_cli("is_prime", "548950623407687320763", "548950623407687320763 is probably prime")
def cli_gen_prime_tests(_tmp_dir):
test_cli("gen_prime", "64", "15568813029901363163")
test_cli("gen_prime", "128", "287193909494025008847286845478788766073")
def cli_cycle_counter(_tmp_dir):
output = test_cli("cpu_clock", None, None)
if output.startswith('No CPU cycle counter on this machine'):
return
have_clock_re = re.compile(r'Estimated CPU clock [0-9\.]+ (M|G)Hz')
if have_clock_re.match(output):
return
logging.error('Unexpected output from cpu_clock: %s', output)
def cli_entropy_tests(_tmp_dir):
output = test_cli("entropy", ["all"], None)
status_re = re.compile('Polling [a-z0-9_]+ gathered [0-9]+ bytes in [0-9]+ outputs with estimated entropy [0-9]+')
unavail_re = re.compile('Source [a-z0-9_]+ is unavailable')
comp_re = re.compile('Sample from [a-z0-9_]+ was .* compressed from [0-9]+ bytes to [0-9]+ bytes')
output_re = re.compile(r'[A-F0-9]+(...)?')
status_next = True
for line in output.split('\n'):
if comp_re.match(line):
continue
if status_next:
if status_re.match(line) is not None:
status_next = False
elif unavail_re.match(line) is not None:
pass
else:
logging.error('Unexpected status line %s', line)
status_next = False
else:
if output_re.match(line) is None:
logging.error('Unexpected sample line %s', line)
status_next = True
def cli_factor_tests(_tmp_dir):
test_cli("factor", "97", "97: 97")
test_cli("factor", "9753893489562389", "9753893489562389: 21433 455087644733")
test_cli("factor", "12019502040659149507", "12019502040659149507: 3298628633 3643787579")
def cli_mod_inverse_tests(_tmp_dir):
test_cli("mod_inverse", "97 802", "339")
test_cli("mod_inverse", "98 802", "0")
def cli_base64_tests(_tmp_dir):
test_cli("base64_enc", "-", "YmVlcyE=", "bees!")
test_cli("base64_dec", "-", "bees!", "YmVlcyE=")
def cli_base32_tests(_tmp_dir):
test_cli("base32_enc", "-", "MJSWK4ZB", "bees!")
test_cli("base32_dec", "-", "bees!", "MJSWK4ZB")
def cli_base58_tests(_tmp_dir):
test_cli("base58_enc", "-", "C6sRAr4", "bees!")
test_cli("base58_dec", "-", "bees!", "C6sRAr4")
test_cli("base58_enc", ["--check", "-"], "Cjv15cdjaBc", "F00F")
test_cli("base58_dec", ["--check", "-"], "F00F", "Cjv15cdjaBc")
def cli_hex_tests(_tmp_dir):
test_cli("hex_enc", "-", "6265657321", "bees!")
test_cli("hex_dec", "-", "bees!", "6265657321")
def cli_hash_tests(_tmp_dir):
test_cli("hash", "--algo=SHA-256",
"E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855 -", "")
test_cli("hash", "--algo=SHA-256",
"BA7816BF8F01CFEA414140DE5DAE2223B00361A396177A9CB410FF61F20015AD -", "abc")
test_cli("hash", ["--algo=SHA-256", "--format=base64"],
"ungWv48Bz+pBQUDeXa4iI7ADYaOWF3qctBD/YfIAFa0= -", "abc")
test_cli("hash", ["--algo=SHA-224", "--format=base58", "--no-fsname"],
"MuGc8HkSVyJjfMjPM5UQikPToBTzNucEghcGLe", "abc")
test_cli("hash", ["--algo=SHA-224", "--format=base58check", "--no-fsname"],
"3MmfMqgrhemdVa9bDAGfooukbviWtKMBx2xauL2RsyAe", "abc")
def cli_hmac_tests(tmp_dir):
key_file = os.path.join(tmp_dir, 'hmac.key')
test_cli("rng", ["64", "--output=%s" % (key_file)], "")
test_cli("hmac", ["--no-fsname", "--hash=SHA-384", key_file, key_file],
"E3A8529377030B28A7DBDFC50DDEC8E4ECEFB6EA850D95EB785938CD3E3AFEF9EF8B08AF219C1496633193468AB755CB")
def cli_bcrypt_tests(_tmp_dir):
test_cli("gen_bcrypt", "--work-factor=4 s3kr1t",
"$2a$04$0.8G7o08XYwvBBWA3l0WUujtwoGZgGDzVSN8fNkNqXikcK4A3lHPS")
test_cli("check_bcrypt", "s3kr1t $2a$04$gHX4Qg7pDSJuXiPXnmt8leyb.FFzX1Bv4rXwIj2cPSakJ8zNnhIka",
"Password is valid")
test_cli("check_bcrypt", "santa $2a$04$gHX4Qg7pDSJuXiPXnmt8leyb.FFzX1Bv4rXwIj2cPSakJ8zNnhIka",
"Password is NOT valid")
def cli_argon2_tests(_tmp_dir):
password = "s3kr1t"
expected = "$argon2id$v=19$m=8,t=1,p=1$2A+I9q2+ZayxDDYC5n2YWw$/Lhx+Jbtlpw+Kxpskfv7+AKhBL/5ebalTJkVC1O5+1E"
test_cli("gen_argon2", ['--mem=8', password], expected)
test_cli("gen_argon2", ['--mem=8', '--t=1', password], expected)
test_cli("gen_argon2", ['--mem=8', '--t=1', '--p=1', password], expected)
test_cli("check_argon2", [password, expected], "Password is valid")
test_cli("check_argon2", ["guessing", expected], "Password is NOT valid")
def cli_gen_dl_group_tests(_tmp_dir):
pem = """-----BEGIN X9.42 DH PARAMETERS-----
MIIBJAKBgwTw7LQiLkXJsrgMVQxTPlWaQlYz/raZ+5RtIZe4YluQgRQGPFADLZ/t
TOYzuIzZJFOcdKtEtrVkxZRGSkjZwKFKLUD6fzSjoC2M2EHktK/y5HsvxBxL4tKr
q1ffbyPQi+iBLYTZAXygvxj2vWyrvA+/w4nbt1fStCHTDhWjLWqFpV9nAoGDAKzA
HUu/IRl7OiUtW/dz36gzEJnaYtz4ZtJl0FG8RJiOe02lD8myqW2sVzYqMvKD0LGx
x9fdSKC1G+aZ/NWtqrQjb66Daf7b0ddDx+bfWTWJ2dOtZd8IL2rmQQJm+JogDi9i
huVYFicDNQGzi+nEKAzrZ1L/VxtiSiw/qw0IyOuVtz8CFjgPiPatvmWssQw2AuZ9
mFvAZ/8wal0=
-----END X9.42 DH PARAMETERS-----"""
test_cli("gen_dl_group", ["--pbits=1043", "--qbits=174"], pem)
dsa_grp = """-----BEGIN DSA PARAMETERS-----
MIIBHgKBgQCyP1vosC/axliM2hmJ9EOSdd1zBkuzMP25CYD8PFkRVrPLr1ClSUtn
eXTIsHToJ7d7sRwtidQGW9BrvUEyiAWE06W/wnLPxB3/g2/l/P2EhbNmNHAO7rV7
ZVz/uKR4Xcvzxg9uk5MpT1VsxA8H6VEwzefNF1Rya92rqGgBTNT3/wIVAL8IVgyt
8mRJqYXO3cJePyd2afjFAoGALscsvwAa7e2onFOTWI2CiOM6JKt4ufqKEDxHyRCd
FcNM20MrP33oocYid8wG6tQjXM8zfGpsdzQK9TU1/zt6eE8it63MlwWCIJas0VQg
LbnM8SOnSzf8REdPgGLVMAFnePphQRB+eeP71euIood/Za1fRPgVeiu+cqrfSb3f
ivM=
-----END DSA PARAMETERS-----"""
test_cli("gen_dl_group", ["--type=dsa", "--pbits=1024"], dsa_grp)
def cli_key_tests(tmp_dir):
pem = """-----BEGIN PRIVATE KEY-----
MIGEAgEAMBAGByqGSM49AgEGBSuBBAAKBG0wawIBAQQg2A+I9q2+ZayxDDYC5n2Y
W8Bn/zBm4D3mwS5qMwADRDehRANCAATwnDFqsjXL9SD/Rr1Vy4pb79PswXdQNZBN
mlLtJ5JvZ0/p6zP3x+Y9yPIrAR8L/acG5ItSrAKXzzuqQQZMv4aN
-----END PRIVATE KEY-----"""
priv_key = os.path.join(tmp_dir, 'priv.pem')
pub_key = os.path.join(tmp_dir, 'pub.pem')
pub_der_key = os.path.join(tmp_dir, 'pub.der')
enc_pem = os.path.join(tmp_dir, 'priv_enc.pem')
enc_der = os.path.join(tmp_dir, 'priv_enc.der')
ca_cert = os.path.join(tmp_dir, 'ca.crt')
crt_req = os.path.join(tmp_dir, 'crt.req')
user_cert = os.path.join(tmp_dir, 'user.crt')
test_cli("keygen", ["--algo=ECDSA", "--params=secp256k1"], pem)
test_cli("keygen", ["--algo=ECDSA", "--params=secp256r1", "--output=" + priv_key], "")
test_cli("pkcs8", "--pub-out --output=%s %s" % (pub_key, priv_key), "")
test_cli("pkcs8", "--pub-out --der-out --output=%s %s" % (pub_der_key, priv_key), "")
test_cli("pkcs8", "--pass-out=foof --der-out --output=%s %s" % (enc_der, priv_key), "")
test_cli("pkcs8", "--pass-out=foof --output=%s %s" % (enc_pem, priv_key), "")
dec_pem = test_cli("pkcs8", ["--pass-in=foof", enc_pem], None)
dec_der = test_cli("pkcs8", ["--pass-in=foof", enc_der], None)
if dec_pem != dec_der:
logging.error("Problem decrypting PKCS8 key")
test_cli("fingerprint", ['--no-fsname', pub_key],
"83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4")
test_cli("fingerprint", ['--no-fsname', pub_der_key],
"83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4")
test_cli("fingerprint", ['--no-fsname', pub_key, pub_der_key],
"83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4\n"
"83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4")
test_cli("fingerprint", [pub_der_key],
pub_der_key +
": 83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4")
test_cli("fingerprint", ['-'],
"83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4",
open(pub_key, 'rb').read().decode())
valid_sig = "nI4mI1ec14Y7nYUWs2edysAVvkob0TWpmGh5rrYWDA+/W9Fj0ZM21qJw8qa3/avAOIVBO6hoMEVmfJYXlS+ReA=="
test_cli("sign", "--provider=base %s %s" % (priv_key, pub_key), valid_sig)
test_cli("verify", [pub_key, pub_key, '-'],
"Signature is valid", valid_sig)
test_cli("verify", [pub_key, pub_key, '-'],
"Signature is invalid",
valid_sig.replace("G", "H"))
test_cli("gen_self_signed",
[priv_key, "CA", "--ca", "--country=VT",
"--dns=ca.example", "--hash=SHA-384", "--output="+ca_cert],
"")
test_cli("cert_verify", ca_cert, "Certificate did not validate - Cannot establish trust")
cert_info = test_cli("cert_info", ['--fingerprint', ca_cert], None)
if cert_info.find('Subject: CN="CA",C="VT"') < 0:
logging.error('Unexpected output for cert_info command %s', cert_info)
if cert_info.find('Subject keyid: 69DD911C9EEE3400C67CBC3F3056CBE711BD56AF9495013F') < 0:
logging.error('Unexpected output for cert_info command %s', cert_info)
test_cli("gen_pkcs10", "%s User --output=%s" % (priv_key, crt_req))
test_cli("sign_cert", "%s %s %s --output=%s" % (ca_cert, priv_key, crt_req, user_cert))
test_cli("cert_verify", [user_cert, ca_cert],
"Certificate passes validation checks")
test_cli("cert_verify", user_cert,
"Certificate did not validate - Certificate issuer not found")
def cli_xmss_sign_tests(tmp_dir):
priv_key = os.path.join(tmp_dir, 'priv.pem')
pub_key = os.path.join(tmp_dir, 'pub.pem')
pub_key2 = os.path.join(tmp_dir, 'pub2.pem')
msg = os.path.join(tmp_dir, 'input')
sig1 = os.path.join(tmp_dir, 'sig1')
sig2 = os.path.join(tmp_dir, 'sig2')
test_cli("rng", ['--output=%s' % (msg)], "")
test_cli("hash", ["--no-fsname", msg], "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855")
test_cli("keygen", ["--algo=XMSS", "--output=%s" % (priv_key)], "")
test_cli("hash", ["--no-fsname", priv_key], "5B38F737BA41BE7F40433DB30EAEF7C41ABB0F7D9E7A09DEB5FDCE7B6811693F")
test_cli("pkcs8", "--pub-out --output=%s %s" % (pub_key, priv_key), "")
test_cli("fingerprint", ['--no-fsname', pub_key],
"B0:F4:98:6E:D8:4E:05:63:A1:D8:4B:37:61:5A:A0:41:78:7E:DE:0E:72:46:E0:A8:D6:CF:09:54:08:DA:A4:22")
# verify the key is updated after each signature:
test_cli("sign", [priv_key, msg, "--output=%s" % (sig1)], "")
test_cli("verify", [pub_key, msg, sig1], "Signature is valid")
test_cli("hash", ["--no-fsname", sig1], "04AF45451C7A9AF2D828E1AD6EC262E012436F4087C5DA6F32C689D781E597D0")
test_cli("hash", ["--no-fsname", priv_key], "67929FAEC636E43DE828C1CD7E2D11CE7C3388CE90DD0A0F687C6627FFA850CD")
test_cli("sign", [priv_key, msg, "--output=%s" % (sig2)], "")
test_cli("verify", [pub_key, msg, sig2], "Signature is valid")
test_cli("hash", ["--no-fsname", sig2], "0785A6AD54CC7D01F2BE2BC6463A3EAA1159792E52210ED754992C5068E8F24F")
test_cli("hash", ["--no-fsname", priv_key], "1940945D68B1CF54D79E05DD7913A4D0B4959183F1E12B81A4E43EF4E63FBD20")
# private key updates, public key is unchanged:
test_cli("pkcs8", "--pub-out --output=%s %s" % (pub_key2, priv_key), "")
test_cli("fingerprint", ['--no-fsname', pub_key2],
"B0:F4:98:6E:D8:4E:05:63:A1:D8:4B:37:61:5A:A0:41:78:7E:DE:0E:72:46:E0:A8:D6:CF:09:54:08:DA:A4:22")
def cli_pbkdf_tune_tests(_tmp_dir):
if not check_for_command("pbkdf_tune"):
return
expected = re.compile(r'For (default|[1-9][0-9]*) ms selected Scrypt\([0-9]+,[0-9]+,[0-9]+\) using [0-9]+ MiB')
output = test_cli("pbkdf_tune", ["--check", "1", "10", "50", "default"], None).split('\n')
for line in output:
if expected.match(line) is None:
logging.error("Unexpected line '%s'", line)
expected_pbkdf2 = re.compile(r'For (default|[1-9][0-9]*) ms selected PBKDF2\(HMAC\(SHA-256\),[0-9]+\)')
output = test_cli("pbkdf_tune", ["--algo=PBKDF2(SHA-256)", "--check", "1", "10", "50", "default"], None).split('\n')
for line in output:
if expected_pbkdf2.match(line) is None:
logging.error("Unexpected line '%s'", line)
expected_argon2 = re.compile(r'For (default|[1-9][0-9]*) ms selected Argon2id\([0-9]+,[0-9]+,[0-9]+\)')
output = test_cli("pbkdf_tune", ["--algo=Argon2id", "--check", "1", "10", "50", "default"], None).split('\n')
for line in output:
if expected_argon2.match(line) is None:
logging.error("Unexpected line '%s'", line)
def cli_psk_db_tests(tmp_dir):
if not check_for_command("psk_get"):
return
psk_db = os.path.join(tmp_dir, 'psk.db')
db_key1 = "909"*32
db_key2 = "451"*32
test_cli("psk_set", [psk_db, db_key1, "name", "F00FEE"], "")
test_cli("psk_set", [psk_db, db_key2, "name", "C00FEE11"], "")
test_cli("psk_set", [psk_db, db_key1, "name2", "50051029"], "")
test_cli("psk_get", [psk_db, db_key1, "name"], "F00FEE")
test_cli("psk_get", [psk_db, db_key2, "name"], "C00FEE11")
test_cli("psk_list", [psk_db, db_key1], "name\nname2")
test_cli("psk_list", [psk_db, db_key2], "name")
def cli_compress_tests(tmp_dir):
if not check_for_command("compress"):
return
input_file = os.path.join(tmp_dir, 'input.txt')
output_file = os.path.join(tmp_dir, 'input.txt.gz')
with open(input_file, 'w') as f:
f.write("hi there")
f.close()
test_cli("compress", input_file)
if not os.access(output_file, os.R_OK):
logging.error("Compression did not created expected output file")
is_py3 = sys.version_info[0] == 3
output_hdr = open(output_file, 'rb').read(2)
if is_py3:
if output_hdr[0] != 0x1F or output_hdr[1] != 0x8B:
logging.error("Did not see expected gzip header")
else:
if ord(output_hdr[0]) != 0x1F or ord(output_hdr[1]) != 0x8B:
logging.error("Did not see expected gzip header")
os.unlink(input_file)
test_cli("decompress", output_file)
if not os.access(input_file, os.R_OK):
logging.error("Decompression did not created expected output file")
recovered = open(input_file).read()
if recovered != "hi there":
logging.error("Decompression did not recover original input")
def cli_rng_tests(_tmp_dir):
test_cli("rng", "10", "D80F88F6ADBE65ACB10C")
test_cli("rng", "16", "D80F88F6ADBE65ACB10C3602E67D985B")
test_cli("rng", "10 6", "D80F88F6ADBE65ACB10C\n1B119CC068AF")
test_cli("rng", ['--format=base64', '10'], "2A+I9q2+ZayxDA==")
test_cli("rng", ['--format=base58', '10'], "D93XRyVfxqs7oR")
test_cli("rng", ['--format=base58check', '10'], "2NS1jYUq92TyGFVnhVLa")
hex_10 = re.compile('[A-F0-9]{20}')
for rng in ['system', 'auto', 'entropy']:
output = test_cli("rng", ["10", '--%s' % (rng)], use_drbg=False)
if output == "D80F88F6ADBE65ACB10C":
logging.error('RNG produced DRBG output')
if hex_10.match(output) is None:
logging.error('Unexpected RNG output %s', output)
has_rdrand = test_cli("cpuid", []).find(' rdrand ') > 0
if has_rdrand:
output = test_cli("rng", ["10", '--rdrand'], use_drbg=False)
if output == "D80F88F6ADBE65ACB10C":
logging.error('RDRAND produced DRBG output')
if hex_10.match(output) is None:
logging.error('Unexpected RNG output %s', output)
def cli_roughtime_check_tests(tmp_dir):
# pylint: disable=line-too-long
if not check_for_command("roughtime_check"):
return
chain = os.path.join(tmp_dir, 'roughtime-chain')
with open(chain, 'w') as f:
f.write("""\
ed25519 bbT+RPS7zKX6w71ssPibzmwWqU9ffRV5oj2OresSmhE= eu9yhsJfVfguVSqGZdE8WKIxaBBM0ZG3Vmuc+IyZmG2YVmrIktUByDdwIFw6F4rZqmSFsBO85ljoVPz5bVPCOw== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWBnGOEajOwPA6G7oL47seBP4C7eEpr57H43C2/fK/kMA0UGZVUdf4KNX8oxOK6JIcsbVk8qhghTwA70qtwpYmQkDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AJrA8tEqPBQAqisiuAxgy2Pj7UJAiWbCdzGz1xcCnja3T+AqhC8fwpeIwW4GPy/vEb/awXW2DgSLKJfzWIAz+2lsR7t4UjNPvAgAAAEAAAABTSUcAREVMRes9Ch4X0HIw5KdOTB8xK4VDFSJBD/G9t7Et/CU7UW61OiTBXYYQTG2JekWZmGa0OHX1JPGG+APkpbsNw0BKUgYDAAAAIAAAACgAAABQVUJLTUlOVE1BWFR/9BWjpsWTQ1f6iUJea3EfZ1MkX3ftJiV3ABqNLpncFwAAAAAAAAAA//////////8AAAAA
ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= uLeTON9D+2HqJMzK6sYWLNDEdtBl9t/9yw1cVAOm0/sONH5Oqdq9dVPkC9syjuWbglCiCPVF+FbOtcxCkrgMmA== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWOw1jl0uSiBEH9HE8/6r7zxoSc01f48vw+UzH8+VJoPelnvVJBj4lnH8uRLh5Aw0i4Du7XM1dp2u0r/I5PzhMQoDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AUBo+tEqPBQC47l77to7ESFTVhlw1SC74P5ssx6gpuJ6eP+1916GuUiySGE/x3Fp0c3otUGAdsRQou5p9PDTeane/YEeVq4/8AgAAAEAAAABTSUcAREVMRe5T1ml8wHyWAcEtHP/U5Rg/jFXTEXOSglngSa4aI/CECVdy4ZNWeP6vv+2//ZW7lQsrWo7ZkXpvm9BdBONRSQIDAAAAIAAAACgAAABQVUJLTUlOVE1BWFQpXlenV0OfVisvp9jDHXLw8vymZVK9Pgw9k6Edf8ZEhUgSGEc5jwUASHLvZE2PBQAAAAAA
ed25519 etPaaIxcBMY1oUeGpwvPMCJMwlRVNxv51KK/tktoJTQ= U53wX99JzZwy4BXa9C6R04bPu4yqFB5w5/wTgG8Mw5wm+VLrY70ECxJ9ZHnpdHVHaLEU3aeLnQFZyZPRAEOCyw== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWMh3mPWCCbOlX8xDWbU9qdfKoReJX/XLsivom8bJJYmcC7T03tyXrtWUheEJweHtg4qMgSyifQS1MjHJSy1jPAsDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8Akxw/tEqPBQBfOsOuciR7jiAW5itQ39y8yVr/ZJmgMwvTjqaU4/wA05ZqG4RqoLdvDXh5bCNySL6LrrnBNSAHwn5COt0CItNuAgAAAEAAAABTSUcAREVMRVP3BIOzsZmuxqMi+ScIBPyKtzFfK7ZlPFNP0JrNwln2QYtAcQFIKywDdNAAL+n8i3dz1p99K50FJjCkCl2J6AMDAAAAIAAAACgAAABQVUJLTUlOVE1BWFQKC/kZVdjiNT2NCSGfnpot4eqipyMFsyMjiIQmqqqXqQCAa245jwUAAGCgA56PBQAAAAAA
ed25519 AW5uAoTSTDfG5NfY1bTh08GUnOqlRb+HVhbJ3ODJvsE= IcZcXFuaLKYYhWcK3sT/6PrVeXMmabCRbf9hvVfkMkqEW1PFL++ZnHJ1/m+G8azITxvktwsfP1YAOOxWdbf9XQ== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWL5DAl8GPNUQ/mSXl0tI4N9yZAO+PiXTodJOTDL+WU/x26iqgyyQRikSSocRMzAEVLDGasdyW19mVC6H/6vfXggDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8Av/JAtEqPBQBIP346SHhCdDfughzeH+uYSbxngDYxqHzBDtZt0obUKrzxfRWzD1oR61B1reLvoPVCKSfzEngi/g1NSQjTrzNMAgAAAEAAAABTSUcAREVMRTQLLplQv0rN4p77Bo59qT8bbquV6MKSwILI/Tw2LLGo9noaZegUFmM+rNu1d1AVOEVQ01j6/2xDmBvp0d6MZgEDAAAAIAAAACgAAABQVUJLTUlOVE1BWFS4a1dYoIB5u/zkbR3sIteuhVrQkszzj+Gng9ywo6O9VgAAAAAAAAAA//////////8AAAAA
ed25519 cj8GsiNlRkqiDElAeNMSBBMwrAl15hYPgX50+GWX/lA= Tsy82BBU2xxVqNe1ip11OyEGoKWhKoSggWjBmDTSBmKbTs7bPPCEidYc5TQ23sQUWe62G35fQOVU28q+Eq5uhQ== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWDAmi7zgXAqLgQXVfbjeqnUZRiXCZI64QIoAKFL83CQHbyXgB4cNwHfQ9mSg0hYxTp1M8QxOuzusnUpk05DIRwwDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AcOBCtEqPBQBhsr1mKOxxCf4VDFzAtYB4Nhs332AN1LrJU/8+VqktzfPd2R7awJHEVEWugvSvOrr+9d332mQObAkYfKfDtbSFAgAAAEAAAABTSUcAREVMRUjnhDvkIjFzTEYtgHOfMpRHtnNZj4P31RFtapkwzGjOtc93pYDd7zqQCw2AVcfbSnPqa8k26z96Q9fVRzq0pw8DAAAAIAAAACgAAABQVUJLTUlOVE1BWFR7qp2oerjpbN8Y23nUGARIlsgkodW4owH29ZKhxDMn8AAAAAAAAAAA//////////8AAAAA
""")
test_cli("roughtime_check", chain, """\
1: UTC 2019-08-04T13:38:17 (+-1000000us)
2: UTC 2019-08-04T13:38:17 (+-1000000us)
3: UTC 2019-08-04T13:38:17 (+-1000000us)
4: UTC 2019-08-04T13:38:18 (+-1000000us)
5: UTC 2019-08-04T13:38:18 (+-1000000us)""")
with open(chain, 'w') as f:
f.write("ed25519 bbT+RPS7zKX6w71ssPibzmwWqU9ffRV5oj2OresSmhE= eu9yhsJfVfguVSqGZdE8WKIxaBBM0ZG3Vmuc+IyZmG2YVmrIktUByDdwIFw6F4rZqmSFsBO85ljoVPz5bVPCOw== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWBnGOEajOwPA6G7oL47seBP4C7eEpr57H43C2/fK/kMA0UGZVUdf4KNX8oxOK6JIcsbVk8qhghTwA70qtwpYmQkDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AJrA8tEqPBQAqisiuAxgy2Pj7UJAiWbCdzGz1xcCnja3T+AqhC8fwpeIwW4GPy/vEb/awXW2DgSLKJfzWIAz+2lsR7t4UjNPvAgAAAEAAAABTSUcAREVMRes9Ch4X0HIw5KdOTB8xK4VDFSJBD/G9t7Et/CU7UW61OiTBXYYQTG2JekWZmGa0OHX1JPGG+APkpbsNw0BKUgYDAAAAIAAAACgAAABQVUJLTUlOVE1BWFR/9BWjpsWTQ1f6iUJea3EfZ1MkX3ftJiV3ABqNLpncFwAAAAAAAAAA//////////8AAAAA")
test_cli("roughtime_check", [chain, "--raw-time"], "1: UTC 1564925897781286 (+-1000000us)")
with open(chain, 'w') as f:
f.write("ed25519 cbT+RPS7zKX6w71ssPibzmwWqU9ffRV5oj2OresSmhE= eu9yhsJfVfguVSqGZdE8WKIxaBBM0ZG3Vmuc+IyZmG2YVmrIktUByDdwIFw6F4rZqmSFsBO85ljoVPz5bVPCOw== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWBnGOEajOwPA6G7oL47seBP4C7eEpr57H43C2/fK/kMA0UGZVUdf4KNX8oxOK6JIcsbVk8qhghTwA70qtwpYmQkDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AJrA8tEqPBQAqisiuAxgy2Pj7UJAiWbCdzGz1xcCnja3T+AqhC8fwpeIwW4GPy/vEb/awXW2DgSLKJfzWIAz+2lsR7t4UjNPvAgAAAEAAAABTSUcAREVMRes9Ch4X0HIw5KdOTB8xK4VDFSJBD/G9t7Et/CU7UW61OiTBXYYQTG2JekWZmGa0OHX1JPGG+APkpbsNw0BKUgYDAAAAIAAAACgAAABQVUJLTUlOVE1BWFR/9BWjpsWTQ1f6iUJea3EfZ1MkX3ftJiV3ABqNLpncFwAAAAAAAAAA//////////8AAAAA")
test_cli("roughtime_check", chain, expected_stderr='Error: Roughtime Invalid signature or public key')
def cli_roughtime_tests(tmp_dir):
# pylint: disable=line-too-long
# pylint: disable=too-many-locals
import socket
import base64
import threading
if not check_for_command("roughtime"):
return
server_port = random_port_number()
chain_file = os.path.join(tmp_dir, 'roughtime-chain')
ecosystem = os.path.join(tmp_dir, 'ecosystem')
def run_udp_server():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = ('127.0.0.1', server_port)
sock.bind(server_address)
while True:
data, address = sock.recvfrom(4096)
if data:
if data != base64.b64decode(server_request):
logging.error("unexpected request")
sock.sendto(base64.b64decode(server_response), address)
udp_thread = threading.Thread(target=run_udp_server)
udp_thread.daemon = True
udp_thread.start()
chain = [
"""\
ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= 2A+I9q2+ZayxDDYC5n2YW8Bn/zBm4D3mwS5qMwADRDcbFpBcf3yPOyeZiqpLBTkxo8GT8zMQFeApv4ScffjC8A== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWDwlo/AkUnTrecAW4Ci5Tkh3KOqs6R7KLTsFtq16RXN5F7G5ckGv11UtzHoZTbKbEk03a6ogAOK54Q2CI/7XGA8DAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AWDLihlaSBQAoq/5gEjRCrhfH16X2GYjQJSG/CgSuGhYeCsrw7XkphLI3cxw2unJRDW8DAJrYqEGaW0NPKZk7bbpPjU/Q6Es1AgAAAEAAAABTSUcAREVMRUJbs67Sb5Wx/jzWyT1PhWR0c4kg59tjSGofo8R3eHzcA9CGwavuRdxOArhVWWODG99gYgfmjcRLgt9/jH+99w4DAAAAIAAAACgAAABQVUJLTUlOVE1BWFRXRfQ1RHLWGOgqABUTYfVBDZrv3OL2nPLYve9ldfNVLOjdPVFFkgUA6D0Vb1mSBQAAAAAA
""",
"""\
ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= 2A+I9q2+ZayxDDYC5n2YW8Bn/zBm4D3mwS5qMwADRDcbFpBcf3yPOyeZiqpLBTkxo8GT8zMQFeApv4ScffjC8A== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWDwlo/AkUnTrecAW4Ci5Tkh3KOqs6R7KLTsFtq16RXN5F7G5ckGv11UtzHoZTbKbEk03a6ogAOK54Q2CI/7XGA8DAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AWDLihlaSBQAoq/5gEjRCrhfH16X2GYjQJSG/CgSuGhYeCsrw7XkphLI3cxw2unJRDW8DAJrYqEGaW0NPKZk7bbpPjU/Q6Es1AgAAAEAAAABTSUcAREVMRUJbs67Sb5Wx/jzWyT1PhWR0c4kg59tjSGofo8R3eHzcA9CGwavuRdxOArhVWWODG99gYgfmjcRLgt9/jH+99w4DAAAAIAAAACgAAABQVUJLTUlOVE1BWFRXRfQ1RHLWGOgqABUTYfVBDZrv3OL2nPLYve9ldfNVLOjdPVFFkgUA6D0Vb1mSBQAAAAAA
ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= 2A+I9q2+ZayxDDYC5n2YW8Bn/zBm4D3mwS5qMwADRDcbFpBcf3yPOyeZiqpLBTkxo8GT8zMQFeApv4ScffjC8A== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWHH5Ofs4HciIFXjE9egjDbistJptoMXIC7ugCgHhI4NPJqfYY256NpULXKc9c30ul7oHXQyKLfGd84mIAxC3UwQDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AuOoUh1aSBQANeC4gGGG3a23PpmF+y6CrUS9VWjyj0Ydpl2tMVDLaK2vd5QtYKKJ3UOyprGKk0D/aPn4E3Bk2rE3BKBZRXM1AAgAAAEAAAABTSUcAREVMRci9uvioJssgd8txxFlqz9RqPx+YLVMkHmm24fMUtYGWF/nhkoEYVGT7O+tXSfHHY/KHcUZjVaZpEt/tmXlXBAUDAAAAIAAAACgAAABQVUJLTUlOVE1BWFSxhKhavdriTvCAtNVcK5yr0cAbsWp2MsrwUV5YTc+7V0CsaLZSkgUAQAxA1GaSBQAAAAAA
""",
"""\
ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= SbWKPilWYrt+1vgFU3jlxGNOH6I/1npX8wl+KoraN3S6VDsyM6EfCV+JPEK8BsNoM2VIpMcSdjcVna/GwXwZkg== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWHH5Ofs4HciIFXjE9egjDbistJptoMXIC7ugCgHhI4NPJqfYY256NpULXKc9c30ul7oHXQyKLfGd84mIAxC3UwQDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AuOoUh1aSBQANeC4gGGG3a23PpmF+y6CrUS9VWjyj0Ydpl2tMVDLaK2vd5QtYKKJ3UOyprGKk0D/aPn4E3Bk2rE3BKBZRXM1AAgAAAEAAAABTSUcAREVMRci9uvioJssgd8txxFlqz9RqPx+YLVMkHmm24fMUtYGWF/nhkoEYVGT7O+tXSfHHY/KHcUZjVaZpEt/tmXlXBAUDAAAAIAAAACgAAABQVUJLTUlOVE1BWFSxhKhavdriTvCAtNVcK5yr0cAbsWp2MsrwUV5YTc+7V0CsaLZSkgUAQAxA1GaSBQAAAAAA
ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= 2A+I9q2+ZayxDDYC5n2YW8Bn/zBm4D3mwS5qMwADRDcbFpBcf3yPOyeZiqpLBTkxo8GT8zMQFeApv4ScffjC8A== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWN5Y0b2irPS1JgqJFQMciPg4aWd9qj1ZqcJc5bGXe1m4ZdAXa5OIhXa0+680MgpyhEHhqYJDIwH1XRa1OZx5YAUDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AgBW3iFaSBQD9WI+Qr6NOZsDmP0PsnCo66mstM3ac5ZON+I+ZeEK8lZWBASvsD2JIfq3v4d1QH5g4STs3wOazQPc25Puy659ZAgAAAEAAAABTSUcAREVMRUJbs67Sb5Wx/jzWyT1PhWR0c4kg59tjSGofo8R3eHzcA9CGwavuRdxOArhVWWODG99gYgfmjcRLgt9/jH+99w4DAAAAIAAAACgAAABQVUJLTUlOVE1BWFRXRfQ1RHLWGOgqABUTYfVBDZrv3OL2nPLYve9ldfNVLOjdPVFFkgUA6D0Vb1mSBQAAAAAA
""",
]
request = [
"AgAAAEAAAABOT05DUEFE/9gPiPatvmWssQw2AuZ9mFvAZ/8wZuA95sEuajMAA0Q3GxaQXH98jzsnmYqqSwU5MaPBk/MzEBXgKb+EnH34wvAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"AgAAAEAAAABOT05DUEFE/0m1ij4pVmK7ftb4BVN45cRjTh+iP9Z6V/MJfiqK2jd0ulQ7MjOhHwlfiTxCvAbDaDNlSKTHEnY3FZ2vxsF8GZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"AgAAAEAAAABOT05DUEFE/0AcDP0F/L7NTiOCQlHovyMlovVtG4lBRqAgydNYk9WOoanOwclZuV8z2b/SCHj5thxbSNxuLNZoDQ2b6TWgPfsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
]
response = [
"BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWDwlo/AkUnTrecAW4Ci5Tkh3KOqs6R7KLTsFtq16RXN5F7G5ckGv11UtzHoZTbKbEk03a6ogAOK54Q2CI/7XGA8DAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AWDLihlaSBQAoq/5gEjRCrhfH16X2GYjQJSG/CgSuGhYeCsrw7XkphLI3cxw2unJRDW8DAJrYqEGaW0NPKZk7bbpPjU/Q6Es1AgAAAEAAAABTSUcAREVMRUJbs67Sb5Wx/jzWyT1PhWR0c4kg59tjSGofo8R3eHzcA9CGwavuRdxOArhVWWODG99gYgfmjcRLgt9/jH+99w4DAAAAIAAAACgAAABQVUJLTUlOVE1BWFRXRfQ1RHLWGOgqABUTYfVBDZrv3OL2nPLYve9ldfNVLOjdPVFFkgUA6D0Vb1mSBQAAAAAA",
"BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWHH5Ofs4HciIFXjE9egjDbistJptoMXIC7ugCgHhI4NPJqfYY256NpULXKc9c30ul7oHXQyKLfGd84mIAxC3UwQDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AuOoUh1aSBQANeC4gGGG3a23PpmF+y6CrUS9VWjyj0Ydpl2tMVDLaK2vd5QtYKKJ3UOyprGKk0D/aPn4E3Bk2rE3BKBZRXM1AAgAAAEAAAABTSUcAREVMRci9uvioJssgd8txxFlqz9RqPx+YLVMkHmm24fMUtYGWF/nhkoEYVGT7O+tXSfHHY/KHcUZjVaZpEt/tmXlXBAUDAAAAIAAAACgAAABQVUJLTUlOVE1BWFSxhKhavdriTvCAtNVcK5yr0cAbsWp2MsrwUV5YTc+7V0CsaLZSkgUAQAxA1GaSBQAAAAAA",
"BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWN5Y0b2irPS1JgqJFQMciPg4aWd9qj1ZqcJc5bGXe1m4ZdAXa5OIhXa0+680MgpyhEHhqYJDIwH1XRa1OZx5YAUDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AgBW3iFaSBQD9WI+Qr6NOZsDmP0PsnCo66mstM3ac5ZON+I+ZeEK8lZWBASvsD2JIfq3v4d1QH5g4STs3wOazQPc25Puy659ZAgAAAEAAAABTSUcAREVMRUJbs67Sb5Wx/jzWyT1PhWR0c4kg59tjSGofo8R3eHzcA9CGwavuRdxOArhVWWODG99gYgfmjcRLgt9/jH+99w4DAAAAIAAAACgAAABQVUJLTUlOVE1BWFRXRfQ1RHLWGOgqABUTYfVBDZrv3OL2nPLYve9ldfNVLOjdPVFFkgUA6D0Vb1mSBQAAAAAA",
]
server_request = request[0]
server_response = response[0]
test_cli("roughtime", [], expected_stderr='Please specify either --servers-file or --host and --pubkey')
with open(ecosystem, 'w') as f:
f.write("Cloudflare-Roughtime ed25519 gD63hSj4ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= udp 127.0.0.1:" + str(server_port))
test_cli("roughtime", [
"--check-local-clock=0",
"--chain-file=",
"--servers-file=" + ecosystem]
, expected_stderr='ERROR: Public key does not match!')
with open(ecosystem, 'w') as f:
f.write("Cloudflare-Roughtime ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= udp 127.0.0.1:" + str(server_port))
test_cli("roughtime", [
"--chain-file=",
"--servers-file=" + ecosystem]
, expected_stderr='ERROR: Local clock mismatch')
test_cli("roughtime", [
"--check-local-clock=0",
"--chain-file=" + chain_file,
"--servers-file=" + ecosystem]
, "Cloudflare-Roughtime : UTC 2019-09-12T08:00:11 (+-1000000us)")
with open(chain_file, 'r') as f:
read_data = f.read()
if read_data != chain[0]:
logging.error("unexpected chain")
server_request = request[1]
server_response = response[1]
test_cli("roughtime", [
"--check-local-clock=0",
"--chain-file=" + chain_file,
"--host=127.0.0.1:" + str(server_port),
"--pubkey=gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo=",
"--raw-time"]
, "UTC 1568275214691000 (+-1000000us)")
with open(chain_file, 'r') as f:
read_data = f.read()
if read_data != chain[1]:
logging.error("unexpected chain")
server_request = request[2]
server_response = response[2]
test_cli("roughtime", [
"--check-local-clock=0",
"--chain-file=" + chain_file,
"--host=127.0.0.1:" + str(server_port),
"--pubkey=gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo=",
"--max-chain-size=2"]
, "UTC 2019-09-12T08:00:42 (+-1000000us)")
with open(chain_file, 'r') as f:
read_data = f.read()
if read_data != chain[2]:
logging.error("unexpected chain")
def cli_zfec_tests(tmp_dir):
input_file = os.path.join(tmp_dir, 'input.bin')
exp_hash = "B49BCD978052C2C05A2D9ACE9863D150E3FA5765FCDF91AC47B5EAD54BFEE24E"
test_cli("rng", ["4096", "--output=%s" % (input_file)], "")
test_cli("hash", ["--no-fsname", input_file], exp_hash)
prefix = "test"
k = 3
n = 5
test_cli("fec_encode", ["--output-dir=%s" % (tmp_dir),
"--prefix=%s" % (prefix),
str(k), str(n), input_file])
info_re = re.compile('FEC share [0-9]/%d with %d needed for recovery' % (n, k))
share_files = []
for share in range(1, n + 1):
expected_share = os.path.join(tmp_dir, '%s.%d_%d.fec' % (prefix, share, n))
share_files.append(expected_share)
info_out = test_cli("fec_info", expected_share)
if info_re.match(info_out) is None:
logging.error("Unexpected output for fec_info")
k_shares = n - k
# Insufficient shares:
test_cli("fec_decode", share_files[(k_shares + 1):], None, None,
"At least %d shares are required for recovery" % (k))
output_file = os.path.join(tmp_dir, 'output.bin')
test_cli("fec_decode", share_files[k_shares:] + ["--output=%s" % (output_file)])
test_cli("hash", ["--no-fsname", output_file], exp_hash)
def cli_pk_workfactor_tests(_tmp_dir):
test_cli("pk_workfactor", "1024", "80")
test_cli("pk_workfactor", "2048", "111")
test_cli("pk_workfactor", ["--type=rsa", "512"], "58")
test_cli("pk_workfactor", ["--type=dl", "512"], "58")
test_cli("pk_workfactor", ["--type=dl_exp", "512"], "192")
def cli_dl_group_info_tests(_tmp_dir):
dl_output = re.compile('(P|G) = [A-F0-9]+')
for bits in [1024, 1536, 2048, 3072, 4096, 6144, 8192]:
output = test_cli("dl_group_info", "modp/ietf/%d" % (bits))
lines = output.split('\n')
if len(lines) != 2:
logging.error('Unexpected output from dl_group_info')
for l in lines:
if not dl_output.match(l):
logging.error('Unexpected output from dl_group_info')
def cli_ec_group_info_tests(_tmp_dir):
# pylint: disable=line-too-long
secp256r1_info = """P = 0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF
A = 0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC
B = 0x5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B
N = 0xFFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551
G = 0x6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296,0x4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5"""
secp256r1_pem = """-----BEGIN EC PARAMETERS-----
MIHgAgEBMCwGByqGSM49AQECIQD/////AAAAAQAAAAAAAAAAAAAAAP//////////
/////zBEBCD/////AAAAAQAAAAAAAAAAAAAAAP///////////////AQgWsY12Ko6
k+ez671VdpiGvGUdBrDMU7D2O848PifSYEsEQQRrF9Hy4SxCR/i85uVjpEDydwN9
gS3rM6D0oTlF2JjClk/jQuL+Gn+bjufrSnwPnhYrzjNXazFezsu2QGg3v1H1AiEA
/////wAAAAD//////////7zm+q2nF56E87nKwvxjJVECAQE=
-----END EC PARAMETERS-----"""
test_cli("ec_group_info", "secp256r1", secp256r1_info)
test_cli("ec_group_info", "--pem secp256r1", secp256r1_pem)
def cli_cpuid_tests(_tmp_dir):
cpuid_output = test_cli("cpuid", [])
if not cpuid_output.startswith('CPUID flags:'):
logging.error('Unexpected cpuid output "%s"', cpuid_output)
flag_re = re.compile('[a-z0-9_]+')
flags = cpuid_output[13:].split(' ')
for flag in flags:
if flag != '' and flag_re.match(flag) is None:
logging.error('Unexpected CPUID flag name "%s"', flag)
def cli_cc_enc_tests(_tmp_dir):
test_cli("cc_encrypt", ["8028028028028029", "pass"], "4308989841607208")
test_cli("cc_decrypt", ["4308989841607208", "pass"], "8028028028028027")
def cli_cert_issuance_tests(tmp_dir):
root_key = os.path.join(tmp_dir, 'root.key')
root_crt = os.path.join(tmp_dir, 'root.crt')
int_key = os.path.join(tmp_dir, 'int.key')
int_crt = os.path.join(tmp_dir, 'int.crt')
int_csr = os.path.join(tmp_dir, 'int.csr')
leaf_key = os.path.join(tmp_dir, 'leaf.key')
leaf_crt = os.path.join(tmp_dir, 'leaf.crt')
leaf_csr = os.path.join(tmp_dir, 'leaf.csr')
test_cli("keygen", ["--params=2048", "--output=" + root_key], "")
test_cli("keygen", ["--params=2048", "--output=" + int_key], "")
test_cli("keygen", ["--params=2048", "--output=" + leaf_key], "")
test_cli("gen_self_signed",
[root_key, "Root", "--ca", "--path-limit=2", "--output="+root_crt], "")
test_cli("gen_pkcs10", "%s Intermediate --ca --output=%s" % (int_key, int_csr))
test_cli("sign_cert", "%s %s %s --output=%s" % (root_crt, root_key, int_csr, int_crt))
test_cli("gen_pkcs10", "%s Leaf --output=%s" % (leaf_key, leaf_csr))
test_cli("sign_cert", "%s %s %s --output=%s" % (int_crt, int_key, leaf_csr, leaf_crt))
test_cli("cert_verify" "%s %s %s" % (leaf_crt, int_crt, root_crt), "Certificate passes validation checks")
def cli_timing_test_tests(_tmp_dir):
timing_tests = ["bleichenbacher", "manger",
"ecdsa", "ecc_mul", "inverse_mod", "pow_mod",
"lucky13sec3", "lucky13sec4sha1",
"lucky13sec4sha256", "lucky13sec4sha384"]
output_re = re.compile('[0-9]+;[0-9];[0-9]+')
for suite in timing_tests:
output = test_cli("timing_test", [suite, "--measurement-runs=16", "--warmup-runs=3"], None).split('\n')
for line in output:
if output_re.match(line) is None:
logging.error("Unexpected output in timing_test %s: %s", suite, line)
def cli_tls_ciphersuite_tests(_tmp_dir):
policies = ['default', 'suiteb_128', 'suiteb_192', 'strict', 'all']
versions = ['tls1.2']
ciphersuite_re = re.compile('^[A-Z0-9_]+$')
for policy in policies:
for version in versions:
if version != 'tls1.2' and policy != 'all':
continue
output = test_cli("tls_ciphers", ["--version=" + version, "--policy=" + policy], None).split('\n')
for line in output:
if ciphersuite_re.match(line) is None:
logging.error("Unexpected ciphersuite line %s", line)
def cli_asn1_tests(_tmp_dir):
input_pem = """-----BEGIN BLOB-----
MCACAQUTBnN0cmluZzEGAQH/AgFjBAUAAAAAAAMEAP///w==
-----END BLOB------
"""
expected = """d= 0, l= 32: SEQUENCE
d= 1, l= 1: INTEGER 5
d= 1, l= 6: PRINTABLE STRING string
d= 1, l= 6: SET
d= 2, l= 1: BOOLEAN true
d= 2, l= 1: INTEGER 99
d= 1, l= 5: OCTET STRING 0000000000
d= 1, l= 4: BIT STRING FFFFFF"""
test_cli("asn1print", "--pem -", expected, input_pem)
def cli_tls_socket_tests(tmp_dir):
if not run_socket_tests() or not check_for_command("tls_client") or not check_for_command("tls_server"):
return
client_msg = b'Client message %d\n' % (random.randint(0, 2**128))
server_port = random_port_number()
priv_key = os.path.join(tmp_dir, 'priv.pem')
ca_cert = os.path.join(tmp_dir, 'ca.crt')
crt_req = os.path.join(tmp_dir, 'crt.req')
server_cert = os.path.join(tmp_dir, 'server.crt')
test_cli("keygen", ["--algo=ECDSA", "--params=secp256r1", "--output=" + priv_key], "")
test_cli("gen_self_signed",
[priv_key, "CA", "--ca", "--country=VT",
"--dns=ca.example", "--hash=SHA-384", "--output="+ca_cert],
"")
test_cli("cert_verify", ca_cert, "Certificate did not validate - Cannot establish trust")
test_cli("gen_pkcs10", "%s localhost --output=%s" % (priv_key, crt_req))
test_cli("sign_cert", "%s %s %s --output=%s" % (ca_cert, priv_key, crt_req, server_cert))
tls_server = subprocess.Popen([CLI_PATH, 'tls_server', '--max-clients=1',
'--port=%d' % (server_port), server_cert, priv_key],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
wait_time = 1.0
time.sleep(wait_time)
tls_client = subprocess.Popen([CLI_PATH, 'tls_client', 'localhost',
'--port=%d' % (server_port), '--trusted-cas=%s' % (ca_cert)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(wait_time)
tls_client.stdin.write(client_msg)
tls_client.stdin.flush()
time.sleep(wait_time)
(stdout, stderr) = tls_client.communicate()
if stderr:
logging.error("Got unexpected stderr output %s", stderr)
if b'Handshake complete' not in stdout:
logging.error('Failed to complete handshake: %s', stdout)
if client_msg not in stdout:
logging.error("Missing client message from stdout %s", stdout)
tls_server.communicate()
def cli_tls_http_server_tests(tmp_dir):
if not run_socket_tests() or not check_for_command("tls_http_server"):
return
try:
from http.client import HTTPSConnection
except ImportError:
try:
from httplib import HTTPSConnection
except ImportError:
return
import ssl
server_port = random_port_number()
priv_key = os.path.join(tmp_dir, 'priv.pem')
ca_cert = os.path.join(tmp_dir, 'ca.crt')
crt_req = os.path.join(tmp_dir, 'crt.req')
server_cert = os.path.join(tmp_dir, 'server.crt')
test_cli("keygen", ["--algo=ECDSA", "--params=secp384r1", "--output=" + priv_key], "")
test_cli("gen_self_signed",
[priv_key, "CA", "--ca", "--country=VT",
"--dns=ca.example", "--hash=SHA-384", "--output="+ca_cert],
"")
test_cli("gen_pkcs10", "%s localhost --output=%s" % (priv_key, crt_req))
test_cli("sign_cert", "%s %s %s --output=%s" % (ca_cert, priv_key, crt_req, server_cert))
tls_server = subprocess.Popen([CLI_PATH, 'tls_http_server', '--max-clients=2',
'--port=%d' % (server_port), server_cert, priv_key],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
wait_time = 1.0
time.sleep(wait_time)
context = ssl.create_default_context(cafile=ca_cert)
conn = HTTPSConnection('localhost', port=server_port, context=context)
conn.request("GET", "/")
resp = conn.getresponse()
if resp.status != 200:
logging.error('Unexpected response status %d', resp.status)
body = str(resp.read())
if body.find('TLS negotiation with Botan 3.') < 0:
logging.error('Unexpected response body')
conn.request("POST", "/logout")
resp = conn.getresponse()
if resp.status != 405:
logging.error('Unexpected response status %d', resp.status)
if sys.version_info.major >= 3:
rc = tls_server.wait(5) # pylint: disable=too-many-function-args
else:
rc = tls_server.wait()
if rc != 0:
logging.error("Unexpected return code from https_server %d", rc)
def cli_tls_proxy_tests(tmp_dir):
# pylint: disable=too-many-locals,too-many-statements
if not run_socket_tests() or not check_for_command("tls_proxy"):
return
try:
from http.client import HTTPSConnection
except ImportError:
try:
from httplib import HTTPSConnection
except ImportError:
return
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
except ImportError:
try:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except ImportError:
return
import ssl
import threading
server_port = random_port_number()
proxy_port = random_port_number()
while server_port == proxy_port:
proxy_port = random_port_number()
priv_key = os.path.join(tmp_dir, 'priv.pem')
ca_cert = os.path.join(tmp_dir, 'ca.crt')
crt_req = os.path.join(tmp_dir, 'crt.req')
server_cert = os.path.join(tmp_dir, 'server.crt')
test_cli("keygen", ["--algo=ECDSA", "--params=secp384r1", "--output=" + priv_key], "")
test_cli("gen_self_signed",
[priv_key, "CA", "--ca", "--country=VT",
"--dns=ca.example", "--hash=SHA-384", "--output="+ca_cert],
"")
test_cli("gen_pkcs10", "%s localhost --output=%s" % (priv_key, crt_req))
test_cli("sign_cert", "%s %s %s --output=%s" % (ca_cert, priv_key, crt_req, server_cert))
tls_proxy = subprocess.Popen([CLI_PATH, 'tls_proxy', str(proxy_port), '127.0.0.1', str(server_port),
server_cert, priv_key, '--output=/tmp/proxy.err', '--max-clients=2'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
wait_time = 1.0
time.sleep(wait_time)
server_response = binascii.hexlify(os.urandom(32))
def run_http_server():
class Handler(BaseHTTPRequestHandler):
def do_GET(self): # pylint: disable=invalid-name
self.send_response(200)
self.end_headers()
self.wfile.write(server_response)
httpd = HTTPServer(('', server_port), Handler)
httpd.serve_forever()
http_thread = threading.Thread(target=run_http_server)
http_thread.daemon = True
http_thread.start()
time.sleep(wait_time)
context = ssl.create_default_context(cafile=ca_cert)
for _i in range(2):
conn = HTTPSConnection('localhost', port=proxy_port, context=context)
conn.request("GET", "/")
resp = conn.getresponse()
if resp.status != 200:
logging.error('Unexpected response status %d', resp.status)
body = resp.read()
if body != server_response:
logging.error('Unexpected response from server %s', body)
if sys.version_info.major >= 3:
rc = tls_proxy.wait(5) # pylint: disable=too-many-function-args
else:
rc = tls_proxy.wait()
if rc != 0:
logging.error('Unexpected return code %d', rc)
def cli_trust_root_tests(tmp_dir):
pem_file = os.path.join(tmp_dir, 'pems')
dn_file = os.path.join(tmp_dir, 'dns')
test_cli("trust_roots", ['--dn-only', '--output=%s' % (dn_file)], "")
dn_re = re.compile('(.+=\".+\")(,.+=\".+\")')
encoding_kwords = {}
if sys.version_info[0] == 3:
encoding_kwords['encoding'] = 'utf8'
for line in open(dn_file, **encoding_kwords):
if dn_re.match(line) is None:
logging.error("Unexpected DN line %s", line)
test_cli("trust_roots", ['--output=%s' % (pem_file)], "")
def cli_tss_tests(tmp_dir):
data_file = os.path.join(tmp_dir, 'data')
exp_hash = "53B3C59276AE30EA7FD882268E80FD96AD80CC9FEB15F9FB940E7C4B5CF80B9E"
test_cli("rng", ["32", "--output=%s" % (data_file)], "")
test_cli("hash", ["--no-fsname", data_file], exp_hash)
m = 3
n = 5
test_cli("tss_split", [str(m), str(n), data_file, "--share-prefix=%s/split" % (tmp_dir)], "")
share_files = []
for i in range(1, n+1):
share = os.path.join(tmp_dir, "split%d.tss" % (i))
if not os.access(share, os.R_OK):
logging.error("Failed to create expected split file %s", share)
share_files.append(share)
rec5 = os.path.join(tmp_dir, "recovered_5")
test_cli("tss_recover", share_files + ["--output=%s" % (rec5)], "")
test_cli("hash", ["--no-fsname", rec5], exp_hash)
rec4 = os.path.join(tmp_dir, "recovered_4")
test_cli("tss_recover", share_files[1:] + ["--output=%s" % (rec4)], "")
test_cli("hash", ["--no-fsname", rec4], exp_hash)
rec3 = os.path.join(tmp_dir, "recovered_3")
test_cli("tss_recover", share_files[2:] + ["--output=%s" % (rec3)], "")
test_cli("hash", ["--no-fsname", rec3], exp_hash)
rec2 = os.path.join(tmp_dir, "recovered_2")
test_cli("tss_recover", share_files[3:] + ["--output=%s" % (rec2)], "", None,
"Error: Insufficient shares to do TSS reconstruction")
def cli_pk_encrypt_tests(tmp_dir):
input_file = os.path.join(tmp_dir, 'input')
ctext_file = os.path.join(tmp_dir, 'ctext')
recovered_file = os.path.join(tmp_dir, 'recovered')
rsa_priv_key = os.path.join(tmp_dir, 'rsa.priv')
rsa_pub_key = os.path.join(tmp_dir, 'rsa.pub')
test_cli("keygen", ["--algo=RSA", "--provider=base", "--params=2048", "--output=%s" % (rsa_priv_key)], "")
key_hash = "D1621B7D1272545F8CCC220BC7F6F5BAF0150303B19299F0C5B79C095B3CDFC0"
test_cli("hash", ["--no-fsname", "--algo=SHA-256", rsa_priv_key], key_hash)
test_cli("pkcs8", ["--pub-out", "%s/rsa.priv" % (tmp_dir), "--output=%s" % (rsa_pub_key)], "")
# Generate a random input file
test_cli("rng", ["10", "16", "32", "--output=%s" % (input_file)], "")
# Because we used a fixed DRBG for each invocation the same ctext is generated each time
rng_output_hash = "32F5E7B61357DE8397EFDA1E598379DFD5EE21767BDF4E2A435F05117B836AC6"
ctext_hash = "FD39EDCAEA56B0FD39AC5CF700EDA79CD80A938C964E78E56BAA6AF742D476A2"
test_cli("hash", ["--no-fsname", "--algo=SHA-256", input_file], rng_output_hash)
# Encrypt and verify ciphertext is the expected value
test_cli("pk_encrypt", [rsa_pub_key, input_file, "--output=%s" % (ctext_file)], "")
test_cli("hash", ["--no-fsname", "--algo=SHA-256", ctext_file], ctext_hash)
# Decrypt and verify plaintext is recovered
test_cli("pk_decrypt", [rsa_priv_key, ctext_file, "--output=%s" % (recovered_file)], "")
test_cli("hash", ["--no-fsname", "--algo=SHA-256", recovered_file], rng_output_hash)
def cli_uuid_tests(_tmp_dir):
test_cli("uuid", [], "D80F88F6-ADBE-45AC-B10C-3602E67D985B")
uuid_re = re.compile(r'[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}')
output = test_cli("uuid", [])
if uuid_re.match(output) is None:
logging.error('Bad uuid output %s', output)
def cli_tls_client_hello_tests(_tmp_dir):
# pylint: disable=line-too-long
chello = "16030100cf010000cb03035b3cf2457b864d7bef2a4b1f84fc3ced2b68d9551f3455ffdd305af277a91bb200003a16b816b716ba16b9cca9cca8c02cc030c02bc02fc0adc0acc024c00ac028c014c023c009c027c013ccaa009f009ec09fc09e006b003900670033010000680000000e000c000009676d61696c2e636f6d000500050100000000000a001a0018001d0017001a0018001b0019001c01000101010201030104000b00020100000d00140012080508040806050106010401050306030403001600000017000000230000ff01000100"
output = test_cli("tls_client_hello", ["--hex", "-"], None, chello)
output_hash = "8EBFC3205ACFA98461128FE5D081D19254237AF84F7DAF000A3C992C3CF6DE44"
test_cli("hash", ["--no-fsname", "--algo=SHA-256", "-"], output_hash, output)
def cli_speed_pk_tests(_tmp_dir):
msec = 1
pk_algos = ["ECDSA", "ECDH", "SM2", "ECKCDSA", "ECGDSA", "GOST-34.10",
"DH", "DSA", "ElGamal", "Ed25519", "Curve25519", "NEWHOPE", "McEliece",
"RSA", "RSA_keygen", "XMSS", "ec_h2c"]
output = test_cli("speed", ["--msec=%d" % (msec)] + pk_algos, None).split('\n')
# ECDSA-secp256r1 106 keygen/sec; 9.35 ms/op 37489733 cycles/op (1 op in 9 ms)
format_re = re.compile(r'^.* [0-9]+ ([A-Za-z0-9 ]+)/sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9\.]+ ms\)')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
def cli_speed_pbkdf_tests(_tmp_dir):
msec = 1
pbkdf_ops = ['bcrypt', 'passhash9', 'argon2']
format_re = re.compile(r'^.* [0-9]+ /sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9]+(\.[0-9]+)? ms\)')
for op in pbkdf_ops:
output = test_cli("speed", ["--msec=%d" % (msec), op], None).split('\n')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
def cli_speed_table_tests(_tmp_dir):
msec = 1
version_re = re.compile(r'^Botan 3\.[0-9]+\.[0-9](\-.*[0-9]+)? \(.*, revision .*, distribution .*\)')
cpuid_re = re.compile(r'^CPUID: [a-z_0-9 ]*$')
format_re = re.compile(r'^AES-128 .* buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB\/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms\)')
tbl_hdr_re = re.compile(r'^algo +operation +1024 bytes$')
tbl_val_re = re.compile(r'^AES-128 +(encrypt|decrypt) +[0-9]+(\.[0-9]{2})$')
output = test_cli("speed", ["--format=table", "--provider=base", "--msec=%d" % (msec), "AES-128"], None).split('\n')
if len(output) != 11:
logging.error('Unexpected number of lines from table output')
if version_re.match(output[0]) is None:
logging.error("Unexpected version line %s", output[0])
if output[1] != '':
if cpuid_re.match(output[1]) is None:
logging.error("Unexpected cpuid line %s", output[1])
elif output[2] != '':
logging.error("Expected newline got %s", output[2])
if format_re.match(output[3]) is None:
logging.error("Unexpected line %s", output[3])
if format_re.match(output[4]) is None:
logging.error("Unexpected line %s", output[4])
if output[5] != '':
logging.error("Expected newline got %s", output[5])
if tbl_hdr_re.match(output[6]) is None:
logging.error("Unexpected table header %s", output[6])
if tbl_val_re.match(output[7]) is None:
logging.error("Unexpected table header %s", output[7])
if tbl_val_re.match(output[8]) is None:
logging.error("Unexpected table header %s", output[8])
if output[9] != '':
logging.error("Expected newline got %s", output[9])
if output[10].find('results are the number of 1000s bytes processed per second') < 0:
logging.error("Unexpected trailing message got %s", output[10])
def cli_speed_invalid_option_tests(_tmp_dir):
speed_usage = "Usage: speed --msec=500 --format=default --ecc-groups= --provider= --buf-size=1024 --clear-cpuid= --cpu-clock-speed=0 --cpu-clock-ratio=1.0 *algos"
test_cli("speed", ["--buf-size=0", "--msec=1", "AES-128"],
expected_stderr="Usage error: Cannot have a zero-sized buffer\n%s" % (speed_usage))
test_cli("speed", ["--buf-size=F00F", "--msec=1", "AES-128"],
expected_stderr="Usage error: Invalid integer value 'F00F' for option buf-size\n%s" % (speed_usage))
test_cli("speed", ["--buf-size=90000000", "--msec=1", "AES-128"],
expected_stderr="Usage error: Specified buffer size is too large\n%s" % (speed_usage))
test_cli("speed", ["--clear-cpuid=goku", "--msec=1", "AES-128"],
expected_stderr="Warning don't know CPUID flag 'goku'")
def cli_speed_math_tests(_tmp_dir):
msec = 1
# these all have a common output format
math_ops = ['mp_mul', 'mp_div', 'mp_div10', 'modexp', 'random_prime', 'inverse_mod',
'rfc3394', 'fpe_fe1', 'ecdsa_recovery', 'ecc_init', 'poly_dbl',
'bn_redc', 'nistp_redc', 'ecc_mult', 'ecc_ops', 'os2ecp', 'primality_test']
format_re = re.compile(r'^.* [0-9]+ /sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9]+(\.[0-9]+)? ms\)')
for op in math_ops:
output = test_cli("speed", ["--msec=%d" % (msec), op], None).split('\n')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
def cli_speed_tests(_tmp_dir):
# pylint: disable=too-many-branches
msec = 1
output = test_cli("speed", ["--msec=%d" % (msec), "--buf-size=64,512", "AES-128"], None).split('\n')
if len(output) % 4 != 0:
logging.error("Unexpected number of lines for AES-128 speed test")
# pylint: disable=line-too-long
format_re = re.compile(r'^AES-128 .* buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB\/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms\)')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
output = test_cli("speed", ["--msec=%d" % (msec), "ChaCha20", "SHA-256", "HMAC(SHA-256)"], None).split('\n')
# pylint: disable=line-too-long
format_re = re.compile(r'^.* buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB\/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms\)')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
output = test_cli("speed", ["--msec=%d" % (msec), "AES-128/GCM"], None).split('\n')
format_re_ks = re.compile(r'^AES-128/GCM\(16\).* [0-9]+ key schedule/sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9\.]+ ms\)')
format_re_cipher = re.compile(r'^AES-128/GCM\(16\) .* buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB\/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms\)')
for line in output:
if format_re_ks.match(line) is None:
if format_re_cipher.match(line) is None:
logging.error('Unexpected line %s', line)
output = test_cli("speed", ["--msec=%d" % (msec), "scrypt"], None).split('\n')
format_re = re.compile(r'^scrypt-[0-9]+-[0-9]+-[0-9]+ \([0-9]+ MiB\) [0-9]+ /sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9\.]+ ms\)')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
output = test_cli("speed", ["--msec=%d" % (msec), "RNG"], None).split('\n')
# ChaCha_RNG generate buffer size 1024 bytes: 954.431 MiB/sec 4.01 cycles/byte (477.22 MiB in 500.00 ms)
format_re = re.compile(r'^.* generate buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
# Entropy source rdseed output 128 bytes estimated entropy 0 in 0.02168 ms total samples 32
output = test_cli("speed", ["--msec=%d" % (msec), "entropy"], None).split('\n')
format_re = re.compile(r'^Entropy source [_a-z0-9]+ output [0-9]+ bytes estimated entropy [0-9]+ in [0-9]+\.[0-9]+ ms .*total samples [0-9]+')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
output = test_cli("speed", ["--msec=%d" % (msec), "zfec"], None).split('\n')
format_re = re.compile(r'^zfec [0-9]+/[0-9]+ (encode|decode) buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
output = test_cli("speed", ["--msec=%d" % (msec), "--format=json", "AES-128"], None)
json_blob = json.loads(output)
if len(json_blob) < 2:
logging.error("Unexpected size for JSON output")
for b in json_blob:
for field in ['algo', 'op', 'events', 'bps', 'buf_size', 'nanos']:
if field not in b:
logging.error('Missing field %s in JSON record %s', field, b)
def run_test(fn_name, fn):
start = time.time()
tmp_dir = tempfile.mkdtemp(prefix='botan_cli_')
try:
fn(tmp_dir)
except Exception as e: # pylint: disable=broad-except
logging.error("Test %s threw exception: %s", fn_name, e)
shutil.rmtree(tmp_dir)
end = time.time()
logging.info("Ran %s in %.02f sec", fn_name, end-start)
def main(args=None):
# pylint: disable=too-many-branches,too-many-locals
if args is None:
args = sys.argv
parser = optparse.OptionParser(
formatter=optparse.IndentedHelpFormatter(max_help_position=50))
parser.add_option('--verbose', action='store_true', default=False)
parser.add_option('--quiet', action='store_true', default=False)
parser.add_option('--threads', action='store', type='int', default=0)
parser.add_option('--run-slow-tests', action='store_true', default=False)
(options, args) = parser.parse_args(args)
setup_logging(options)
if len(args) < 2:
logging.error("Usage: %s path_to_botan_cli [test_regex]", args[0])
return 1
if not os.access(args[1], os.X_OK):
logging.error("Could not access/execute %s", args[1])
return 2
threads = options.threads
if threads == 0:
threads = multiprocessing.cpu_count()
global CLI_PATH
CLI_PATH = args[1]
test_regex = None
if len(args) == 3:
try:
test_regex = re.compile(args[2])
except re.error as e:
logging.error("Invalid regex: %s", str(e))
return 1
slow_test_fns = [
cli_speed_tests,
cli_speed_pk_tests,
cli_speed_math_tests,
cli_speed_pbkdf_tests,
cli_speed_table_tests,
cli_speed_invalid_option_tests,
cli_xmss_sign_tests,
]
fast_test_fns = [
cli_argon2_tests,
cli_asn1_tests,
cli_base32_tests,
cli_base58_tests,
cli_base64_tests,
cli_bcrypt_tests,
cli_cc_enc_tests,
cli_cycle_counter,
cli_cert_issuance_tests,
cli_compress_tests,
cli_config_tests,
cli_cpuid_tests,
cli_dl_group_info_tests,
cli_ec_group_info_tests,
cli_entropy_tests,
cli_factor_tests,
cli_gen_dl_group_tests,
cli_gen_prime_tests,
cli_hash_tests,
cli_help_tests,
cli_hex_tests,
cli_hmac_tests,
cli_is_prime_tests,
cli_key_tests,
cli_mod_inverse_tests,
cli_pbkdf_tune_tests,
cli_pk_encrypt_tests,
cli_pk_workfactor_tests,
cli_psk_db_tests,
cli_rng_tests,
cli_roughtime_check_tests,
cli_roughtime_tests,
cli_timing_test_tests,
cli_tls_ciphersuite_tests,
cli_tls_client_hello_tests,
cli_tls_http_server_tests,
cli_tls_proxy_tests,
cli_tls_socket_tests,
cli_trust_root_tests,
cli_tss_tests,
cli_uuid_tests,
cli_version_tests,
cli_zfec_tests,
]
test_fns = []
if options.run_slow_tests:
test_fns = slow_test_fns + fast_test_fns
else:
test_fns = fast_test_fns
tests_to_run = []
for fn in test_fns:
fn_name = fn.__name__
if test_regex is None or test_regex.search(fn_name) is not None:
tests_to_run.append((fn_name, fn))
start_time = time.time()
if threads > 1:
pool = ThreadPool(processes=threads)
results = []
for test in tests_to_run:
results.append(pool.apply_async(run_test, test))
for result in results:
result.get()
else:
for test in tests_to_run:
run_test(test[0], test[1])
end_time = time.time()
print("Ran %d tests with %d failures in %.02f seconds" % (
TESTS_RUN, TESTS_FAILED, end_time - start_time))
if TESTS_FAILED > 0:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
trello-hipchat.py
|
"""
Simple Flask Webapp to receive Trello Webhook API callbacks and Post to
a HipChat room.
Copyright 2013 Valentin v. Seggern <valentin.vonseggern@telekom.de>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import logging
import hmac
from hashlib import sha1
from base64 import b64encode
from ConfigParser import ConfigParser
from threading import Thread
from flask import Flask
from flask import request
from werkzeug.exceptions import Unauthorized
import hipchat
import requests
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
app = Flask(__name__)
config = ConfigParser()
config.read('trello-hipchat.cfg')
@app.route('/board_modified', methods=['POST', 'HEAD'])
def board_modified():
"""
This is the callback handler, that is called by Trellos webhook API.
The easiest way to stop callbacks is to "return 'gone', 410" from this method :-)
"""
if 'HEAD' in request.method:
logger.debug('Trello is checking us out... Well - hello there! %s', request.data)
return "Welcome dear Trello..."
# The registration HEAD for some reason does not come with a verification header so therefore we only
# verify POST requests.
try:
verify_request()
except Unauthorized, e:
logger.warning('Request could not be authenticated. Possible attack event. Headers: %s. Data: %s',
request.headers, request.data)
raise e
payload = json.loads(request.data)
event_type = payload['action']['type']
if event_type == 'updateCard' or event_type == 'createCard':
handle_card_update(payload['action'])
return 'Thanks, Trello...'
def verify_request():
"""
Verifies Trello requests using the HMAC mechanism described in the trello docs. This
makes sure that the sources of the callbacks are coming from our trello board. If verification
fals, this method raises Unauthorized().
"""
secret = config.get('trello', 'secret')
cb = config.get('integration', 'callback_url')
raw = request.data + cb
mac = hmac.new(secret, raw, sha1)
h = b64encode(mac.digest()).lower()
request_header = request.headers.get('x-trello-webhook', 'no such header').lower()
if h != request_header:
raise Unauthorized()
def handle_card_update(action):
logger.info('handling card update: %s', json.dumps(action, indent=4))
parsed = parse(action)
if parsed:
notify_hipchat('%(name)s just %(action)s %(item)s' % (parsed))
def parse(action):
"""
Parses the trello request into a dict with action (a sentence), name and item.
Returns this parsed structure or None, if the trello request could not be parsed.
"""
try:
if action['type'] == 'createCard':
list_after = action['data']['list']['name']
else:
list_after = action['data']['listAfter']['name']
parsed = {}
logger.debug('card in list %s.', list_after)
if list_after == get_list_name('list_name_todo'):
parsed['action'] = 'put back'
elif list_after == get_list_name('list_name_progress'):
parsed['action'] = 'started working on'
elif list_after == get_list_name('list_name_review'):
parsed['action'] = 'finshed coding'
elif list_after == get_list_name('list_name_done'):
parsed['action'] = 'finished'
elif list_after == get_list_name('list_name_bugtracker'):
parsed['action'] = 'created a new bug: '
else:
parsed['action'] = 'used unconfigured list %s' % list_after
parsed['name'] = action['memberCreator']['fullName']
parsed['item'] = action['data']['card']['name']
return parsed
except KeyError, e:
logger.debug("""Got a KeyError (%s) while parsing request from Trello.
Probably this was not a move card event...""", e)
def get_list_name(config_name):
"""Return the trello list name"""
return config.get('trello', config_name)
def notify_hipchat(msg):
logger.debug('Sending "%s" to hipchat' % msg)
hipster = hipchat.HipChat(token=config.get('hipchat', 'token'))
hipster.message_room(config.get('hipchat', 'room'), config.get('hipchat', 'sender'), msg)
@app.before_first_request
def init():
# Run the init in another thread so that the other endpoints can answer.
Thread(target=register_at_trello).start()
logger.debug('Configured boards: %s -> %s -> %s -> %s -> %s',
get_list_name('list_name_todo'),
get_list_name('list_name_progress'),
get_list_name('list_name_review'),
get_list_name('list_name_done'),
get_list_name('list_name_bugtracker'))
def register_at_trello():
create_webhook = {
'idModel': config.get('trello', 'board_id'),
'callbackURL': config.get('integration', 'callback_url')
}
headers = {'content-type': 'application/json'}
endpoint = 'https://api.trello.com/1/token/%s/webhooks?key=%s' % \
(config.get('trello', 'token'), config.get('trello', 'key'))
payload = json.dumps(create_webhook, indent=4)
logger.debug('Posting to %s: %s', endpoint, payload)
resp = requests.put(endpoint, data=payload, headers=headers)
if resp.status_code == 200:
logger.info('GREAT SUCCESS... Registering webhook at trello worked.')
return True
logger.error('Failed to register at trello with HTTP %s: %s', resp.status_code, resp.text)
return False
|
worker.py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Android code execution worker.
A deployment will have n >= 1 worker machines. This worker runs on each of them,
along with server.py. server.py is a webserver; this script handles all Android
operations (build, execution, etc.). It also handles setup and configuration of
the worker machine's environment.
Currently n > 1 workers is not supported.
This is a proof-of-concept implementation and it has many shortcomings:
1. Full support for multiple running emulators is not done yet, and attempting
to use different emulators will cause hangs. For now, the fix is to use the
same emulator settings for each entry in runtimes/config.json, which limits
max concurrent requests to 1, and we don't do any locking so requests running
at the same time will race. Long term fix is to delegate the emulator name
and port to all commands, including gradle, then to make the server
multithreaded and to dispatch concurrent requests to different emulators.
2. Starting emulators happens serially, which is very slow, and we should
parallelize this.
3. Only 64-bit Linux is currently supported, and only running the 32-bit
Android toolchain. This is weird; the reason is that the 64-bit toolchain
requires x86 emulation, which in turn requires KVM support.
4. We run Android code in a VM, so the host machine must support nested VMs or
performance will be abysmal. The fix for this, and for the 64-bit issue, is
to run under KVM.
5. Output in the success case is a screenshot. Ideally, it would be an
interactive emulator session.
6. We do a full compile, apk load, and test run on each test invocation. This
takes ~30s when running natively and ~45s under emulation. This could be
improved substantially by being more incremental, and the emulation penalty
could be decreased with KVM.
7. The test patch implementation assumes only one file is being edited. It could
be trivially extended to support n >= 0 patches.
8. In headless mode we still rely on the shell having a DISPLAY var set and we
die if it is missing. We should remove this dependency and only require
DISPLAY when headed.
Steps for running a worker on AWS:
1. Launch Ubuntu Server 14.04 LTS 64-bit (ami-3d50120d) via
http://console.aws.amazon.com. Set type to something with 40+ GB of storage.
You may wish to use other storage options (e.g. for durability between
instance restarts) in which case you will need to mount them to /mnt or use a
different mount point in step 5, below.
2. Create a security group permitting custom TCP traffic on port 8080 and source
Anywhere. Apply it to your instance.
3. ssh -X -i <key.pem> ubuntu@<public_dns> where
* key.pem is the key file you downloaded from AWS
* public_dns is the Public DNS of the instance in the EC2 console.
Be sure to use -X; this sets the shell's DISPLAY var correctly.
4. sudo dpkg --add-architecture i386 && \
sudo apt-get update && \
sudo apt-get install \
git \
libgcc1:i386 \
lib32ncurses5 \
lib32stdc++6 \
lib32z1 \
openjdk-7-jdk \
openjdk-7-jre \
unzip
5. sudo mkdir -p /usr/local/cacm && \
sudo chown ubuntu /usr/local/cacm && \
cd /usr/local/cacm
6. git clone https://github.com/google/coursebuilder-android-container-module \
&& cd coursebuilder-android-container-module
7. python android/worker.py
8. sh android/get_aws_public_hostname.sh
8. python android/server.py --host $(cat android/.aws_public_hostname)
Your worker can now be reached by a FE for REST operations. If you want to put
it behind a balancer like ELB, do an HTTP health check against /health and
expect a 200 if the instance can accept requests to start running new jobs.
Determining the number of workers you need is straightforward: each worker can
handle many concurrent requests for past results, but only one request at a time
for executing a new job.
"""
import argparse
import base64
import datetime
import json
import logging
import md5
import multiprocessing
import os
import re
import shutil
import subprocess
import sys
import time
ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
_ACCEPT_LICENSE_NEEDLE = 'Do you accept the license'
_ANDROID_HOME = 'ANDROID_HOME'
_ANDROID_SDK_HOME = 'ANDROID_SDK_HOME'
_BOOT_ANIMATION_STOPPED = 'stopped\r'
_BOOT_ANIMATION_PROPERTY = 'init.svc.bootanim'
_CLEAN_ALL = 'all'
_CLEAN_EMULATORS = 'emulators'
_CLEAN_LOCAL = 'local'
_CLEAN_PYC = 'pyc'
_CLEAN_RESOURCES = 'resources'
_CLEAN_RESULTS = 'results'
_CLEAN_RUNTIMES = 'runtimes'
_CLEAN_CHOICES = [
_CLEAN_ALL,
_CLEAN_EMULATORS,
_CLEAN_LOCAL, # All but resources.
_CLEAN_PYC,
_CLEAN_RESOURCES,
_CLEAN_RESULTS,
_CLEAN_RUNTIMES,
]
_DISPLAY = 'DISPLAY'
_EMULATOR = 'emulator'
_GRADLEW_INSTALL_SUCCESS_NEEDLE = 'BUILD SUCCESSFUL'
LOG_DEBUG = 'DEBUG'
LOG_ERROR = 'ERROR'
LOG_INFO = 'INFO'
LOG_WARNING = 'WARNING'
LOG_LEVEL_CHOICES = [
LOG_DEBUG,
LOG_ERROR,
LOG_INFO,
LOG_WARNING,
]
_LOG = logging.getLogger('android.worker')
_TEST_FAILURE_NEEDLE = 'FAILURES!!!\r'
_PROJECTS_PATH = os.path.join(ROOT_PATH, 'projects')
_PROJECTS_CONFIG = os.path.join(_PROJECTS_PATH, 'config.json')
_RESOURCES_PATH = os.path.join(ROOT_PATH, 'resources')
_RESOURCES_TMP_PATH = os.path.join(_RESOURCES_PATH, 'tmp')
_RESULT_IMAGE_NAME = 'result.jpg'
_RESULT_JSON_NAME = 'result.json'
_RESULTS_PATH = os.path.join(ROOT_PATH, 'results')
_RESULTS_TTL_SEC = 60 * 30
_RUNTIMES_PATH = os.path.join(ROOT_PATH, 'runtimes')
_RUNTIMES_CONFIG = os.path.join(_RUNTIMES_PATH, 'config.json')
_PARSER = argparse.ArgumentParser()
_PARSER.add_argument(
'--clean', type=str, choices=_CLEAN_CHOICES,
help='Remove entities created by worker.py')
_PARSER.add_argument(
'--log_level', type=str, choices=LOG_LEVEL_CHOICES, default=LOG_INFO,
help='Display log messages at or above this level')
_PARSER.add_argument(
'--show_emulator', action='store_true',
help='Pass to display the emulator rather than run it headless')
_PARSER.add_argument(
'--stop', action='store_true', help='Stop running emulators')
_PARSER.add_argument(
'--test', type=str, help='Name of the project to run the tests for')
def configure_logger(log_level, log_file=None):
logging.basicConfig(filename=log_file, level=log_level)
def main(args):
configure_logger(args.log_level)
config = Config.load()
if args.clean:
_clean(args.clean, config.projects, config.runtimes)
elif args.stop:
_stop(config.runtimes)
elif args.test:
_test(
args.test, config.projects.get(args.test),
config.runtimes.get(args.test), strict=True)
else:
_ensure_resources_dirs_exist()
_ensure_sdk_installed()
_ensure_runtimes_exist(config.runtimes)
_ensure_projects_exist(config.projects)
_build_all(config.projects) # Warm the build.
_ensure_emulators_running_and_ready(
config.runtimes, headless=not args.show_emulator)
_install_packages(config.projects)
def fork_test(config, project_name, ticket, patches=None):
# Runs a test in a fork; returns PID if test starts else None.
child = multiprocessing.Process(
target=run_test, args=(config, project_name, ticket),
kwargs={'patches': patches})
child.daemon = True
child.start()
return child.pid
def run_test(config, project_name, ticket, patches=None):
patches = patches if patches else []
test_env = _TestEnvironment(ticket)
test_env.set_up() # All exit points from this fn must call tear_down().
test_run = TestRun()
if not patches:
return _run_test_failure(
test_env, test_run, ticket, 'Must specify test patches',
TestRun.CONTENTS_MALFORMED)
src_project = config.get_project(project_name)
if not src_project:
return _run_test_failure(
test_env, test_run, ticket,
'Unable to find project named ' + project_name,
TestRun.PROJECT_MISCONFIGURED)
runtime = config.get_runtime(project_name)
if not runtime:
return _run_test_failure(
test_env, test_run, ticket,
'Unable to find runtime for project named ' + project_name,
TestRun.RUNTIME_MISCONFIGURED)
try:
Lock.get(ticket)
test_env.set_up_projects(patches, src_project)
_LOG.info('Begin test run of project ' + test_env.test_project.name)
test_run = TestRun()
test_run.set_status(TestRun.TESTS_RUNNING)
test_env.save(test_run)
test_run = _test(
test_env.test_project.name, test_env.test_project, runtime,
strict=False)
_LOG.info('End test run of project ' + test_env.test_project.name)
test_env.save(test_run)
return ticket
except LockError:
return _run_test_failure(
test_env, test_run, ticket, 'Worker busy', TestRun.UNAVAILABLE)
finally:
test_env.tear_down()
# Since we unlock after tear_down, which restores the logger, result dir
# logs will not contain an entry for the lock release. However, the main
# server log will.
Lock.release()
def _run_test_failure(test_env, test_run, ticket, payload, status):
test_run.set_payload(payload)
test_run.set_status(status)
test_env.save(test_run)
test_env.tear_down()
return ticket
class Config(object):
def __init__(self, projects, runtimes):
self.projects = projects
self.runtimes = runtimes
def get_project(self, name):
return self.projects.get(name)
def get_runtime(self, project_name):
return self.runtimes.get(project_name)
@classmethod
def load(cls):
projects = _read_json(_PROJECTS_CONFIG)
runtimes = _read_json(_RUNTIMES_CONFIG)
return cls(
{k: _Project.from_config(k, v) for k, v in projects.iteritems()},
{k: _Runtime.from_config(k, v) for k, v in runtimes.iteritems()})
class Error(Exception):
"""Base error class."""
class LockError(Error):
"""Raised when a lock operation fails."""
class Lock(object):
"""Persistent lock to prevent concurrent requests on one worker."""
_PATH = os.path.join(ROOT_PATH, '.lock')
def __init__(self):
super(Lock, self).__init__()
assert False, 'Instantiation not supported'
@classmethod
def active(cls):
return os.path.exists(cls._PATH)
@classmethod
def get(cls, ticket):
if cls.active():
raise LockError('Lock already active')
contents = str(ticket)
with open(cls._PATH, 'w') as f:
f.write(contents)
_LOG.info('Acquired execution lock with ticket ' + contents)
@classmethod
def release(cls):
if not cls.active():
raise LockError('Lock not active')
contents = str(cls.value())
os.remove(cls._PATH)
_LOG.info('Released execution lock with ticket ' + contents)
@classmethod
def value(cls):
if not cls.active():
return None
with open(cls._PATH) as f:
return f.read().strip()
class Patch(object):
def __init__(self, filename, contents):
self.contents = contents
self.filename = filename
class TestRun(object):
BUILD_FAILED = 'build_failed'
BUILD_SUCCEEDED = 'build_succeeded'
CONTENTS_MALFORMED = 'contents_malformed'
NOT_FOUND = 'not_found'
PROJECT_MISCONFIGURED = 'project_misconfigured'
RUNTIME_MISCONFIGURED = 'runtime_misconfigured'
RUNTIME_NOT_RUNNING = 'runtime_not_running'
TESTS_FAILED = 'tests_failed'
TESTS_RUNNING = 'tests_running'
TESTS_SUCCEEDED = 'tests_succeeded'
UNAVAILABLE = 'unavailable'
STATUSES = frozenset((
BUILD_FAILED,
BUILD_SUCCEEDED,
CONTENTS_MALFORMED,
NOT_FOUND,
PROJECT_MISCONFIGURED,
RUNTIME_MISCONFIGURED,
RUNTIME_NOT_RUNNING,
TESTS_FAILED,
TESTS_RUNNING,
TESTS_SUCCEEDED,
UNAVAILABLE,
))
def __init__(self):
self._payload = None
self._status = None
def get_payload(self):
return self._payload
def get_status(self):
return self._status
def set_payload(self, value):
self._payload = value
def set_status(self, value):
if value not in self.STATUSES:
raise ValueError(
'Value %s invalid; choices are %s' % (
value, ', '.join(sorted(self.STATUSES))))
self._status = value
def to_dict(self):
return {
'payload': self.get_payload(),
'status': self.get_status(),
}
def _build_all(projects):
for project in projects.values():
project.build()
def _clean(clean, projects, runtimes):
# Emulators depend on projects and runtimes; do them first.
if clean in (_CLEAN_ALL, _CLEAN_EMULATORS, _CLEAN_LOCAL):
_clean_emulators(projects, runtimes)
if clean in (_CLEAN_ALL, _CLEAN_LOCAL, _CLEAN_RESULTS):
_clean_results()
if clean in (_CLEAN_ALL, _CLEAN_LOCAL, _CLEAN_RUNTIMES):
_clean_runtimes(runtimes)
# We can clean most accurately if we still have the SDK, so save cleaning it
# up for the end.
if clean in (_CLEAN_ALL, _CLEAN_RESOURCES):
_clean_resources()
# Finally, .pyc files because they could be created by other cleanup code.
if clean in (_CLEAN_ALL, _CLEAN_LOCAL, _CLEAN_PYC):
_clean_pyc()
def _clean_emulators(projects, strict=False):
for project in projects.values():
project.uninstall(strict=strict)
def _clean_pyc():
count = 0
for root, _, files in os.walk(ROOT_PATH):
for path in files:
if os.path.splitext(path)[1] == '.pyc':
os.remove(os.path.join(root, path))
count += 1
_LOG.info('Removed %s .pyc file%s', count, 's' if count != 1 else '')
def _clean_results():
if os.path.exists(_RESULTS_PATH):
shutil.rmtree(_RESULTS_PATH)
_LOG.info('Removed results directory %s', _RESULTS_PATH)
def _clean_resources():
if os.path.exists(_RESOURCES_PATH):
shutil.rmtree(_RESOURCES_PATH)
_LOG.info('Removed resources directory %s', _RESOURCES_PATH)
def _clean_runtimes(runtimes):
for runtime in runtimes.values():
runtime.clean()
def _die(message):
_LOG.critical(message)
sys.exit(1)
def _ensure_emulators_running_and_ready(runtimes, headless=True):
# TODO(johncox): serial and slow; parallelize if we have many runtimes.
for runtime in runtimes.values():
if runtime.ready():
_LOG.info(
'Emulator for runtime %s already ready on port %s; reusing',
runtime.project_name, runtime.port)
else:
runtime.start(headless=headless)
_LOG.info(
'Emulator for runtime %s not ready; waiting',
runtime.project_name)
runtime.block_until_ready()
_LOG.info('Runtime %s emulator ready', runtime.project_name)
def _ensure_projects_exist(projects):
for project in projects.values():
if not project.exists():
_die(
'Project %s does not exist at %s; aborting', project.name,
project_path)
def _ensure_runtimes_exist(runtimes):
for runtime in runtimes.values():
if runtime.exists():
_LOG.info(
'Runtime %s exists at %s; skipping', runtime.project_name,
runtime.path)
else:
_LOG.info(
'Runtime %s missing or in inconsistent state; re-creating',
runtime.project_name)
runtime.clean()
runtime.create()
def _ensure_resources_dirs_exist():
if not os.path.exists(_RESOURCES_PATH):
os.mkdir(_RESOURCES_PATH)
_LOG.info('Created resources directory %s', _RESOURCES_PATH)
else:
_LOG.info('Using existing resources directory %s', _RESOURCES_PATH)
if not os.path.exists(_RESOURCES_TMP_PATH):
os.mkdir(_RESOURCES_TMP_PATH)
_LOG.info('Created resources temp directory %s', _RESOURCES_TMP_PATH)
else:
_LOG.info(
'Using existing resources temp directory %s', _RESOURCES_TMP_PATH)
def _ensure_sdk_installed():
if not _Sdk.is_installed():
_Sdk.install()
else:
_LOG.info('Using existing SDK at %s', _Sdk.PATH)
def _get_fingerprint(value):
return md5.new(value).hexdigest()
def _get_project_runtime_iter(projects, runtimes):
"""Gets iterator over (project, runtime) pairs ordered by project name."""
assert len(projects) == len(runtimes)
projects = sorted(projects.values(), key=lambda p: p.name)
runtimes = sorted(runtimes.values(), key=lambda r: r.project_name)
return ((project, runtime) for project, runtime in zip(projects, runtimes))
def _get_strict_handler(strict):
return _die if strict else _LOG.info
def _install_packages(projects):
for project in projects.values():
project.install()
def _read_json(path):
with open(path) as f:
try:
return json.loads(f.read())
except: # Treat all errors the same. pylint: disable=bare-except
_LOG.error(
'Unable to load json from %s; file broken or json malformed',
path)
def _run(command_line, cwd=None, env=None, proc_fn=None, strict=True):
env = env if env is not None else {}
result = []
_LOG.debug('Running command: ' + ' '.join(command_line))
proc = subprocess.Popen(
command_line, cwd=cwd, env=env, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
if proc_fn:
proc_fn(proc)
got_stdout, got_stderr = proc.communicate()
if got_stdout:
for line in got_stdout.split('\n'):
result.append(line)
if got_stderr:
for line in got_stderr.split('\n'):
result.append(line)
if proc.returncode != 0 and strict:
_die(
'Error running command "%s":\n%s' % (
' '.join(command_line), '\n'.join(result)))
return proc.returncode, result
def _stop(runtimes):
for runtime in runtimes.values():
runtime.stop()
def _test(name, project, runtime, strict=False):
"""Run a project's tests, either under worker.py or under a web caller."""
handler = _get_strict_handler(strict)
test_run = TestRun()
if not project:
handler('Unable to find project named %s; aborting' % name)
test_run.set_status(TestRun.PROJECT_MISCONFIGURED)
return test_run
if not runtime:
handler('Unable to find runtime named %s; aborting' % name)
test_run.set_status(TestRun.RUNTIME_MISCONFIGURED)
return test_run
if not runtime.ready():
handler('Runtime %s not running; aborting' % name)
test_run.set_status(TestRun.RUNTIME_NOT_RUNNING)
return test_run
build_succeeded, build_result = project.install()
if not build_succeeded:
test_run.set_status(TestRun.BUILD_FAILED)
test_run.set_payload('\n'.join(build_result))
return test_run
test_run.set_status(TestRun.BUILD_SUCCEEDED)
test_succeeded, test_result = project.test()
if not test_succeeded:
test_run.set_status(TestRun.TESTS_FAILED)
test_run.set_payload('\n'.join(test_result))
return test_run
test_run.set_status(TestRun.TESTS_SUCCEEDED)
test_run.set_payload(test_result)
_LOG.info('Tests succeeded for project %s', name)
return test_run
class _Project(object):
def __init__(
self, name, editor_file, package, path, test_class, test_package):
self.editor_file = editor_file
self.name = name
self.package = package
self.path = path
self.test_class = test_class
self.test_package = test_package
@classmethod
def from_config(cls, key, value):
return cls(
key, os.path.join(_PROJECTS_PATH, key, value['editorFile']),
value['package'], os.path.join(_PROJECTS_PATH, key),
value['testClass'], value['testPackage'])
def build(self, strict=False):
handler = _get_strict_handler(strict)
code, result = _run(
[self._get_gradlew(), 'build'], cwd=self.path,
env=_Sdk.get_shell_env(), strict=False)
if code:
handler(
'Build for project %s failed; result: %s' % (
self.name, '\n'.join(result)))
return False, result
_LOG.info('Project %s built', self.name)
return True, result
def exists(self):
return os.path.exists(self.path)
def install(self):
"""Install packages under worker.py and external callers."""
_, result = _run(
[self._get_gradlew(), 'installDebug'], cwd=self.path,
env=_Sdk.get_shell_env(), strict=False)
if self._gradlew_failed(result):
message = (
'Unable to build and install debug package from Project %s; '
'error:\n%s') % (self.name, '\n'.join(result))
_LOG.error(message)
return False, result
else:
_LOG.info('Installed debug package from Project %s', self.name)
_, result = _run(
[self._get_gradlew(), 'installDebugTest'], cwd=self.path,
env=_Sdk.get_shell_env(), strict=False)
if self._gradlew_failed(result):
message = (
'Unable to build and install debug test package from Project '
'%s; error:\n%s') % (self.name, '\n'.join(result))
_LOG.error(message)
return False, result
else:
_LOG.info('Installed debug test package from Project %s', self.name)
return (
True,
[('Debug and test debug packages installed from Project '
'%s') % self.name])
def patch(self, patch):
"""Apply a patch to the project's filesystem."""
if not os.path.exists(patch.filename):
_die('Unable to apply patch; no file named ' + patch.filename)
with open(patch.filename, 'w') as f:
f.write(patch.contents)
_LOG.debug(
'Patched file %s with contents fingerprint %s',
patch.filename, _get_fingerprint(patch.contents))
def test(self):
"""Runs tests under worker.py and external callers."""
_, result = _run([
_Sdk.get_adb(), 'shell', 'am', 'instrument', '-w', '-e', 'class',
self.test_class,
'%s/android.test.InstrumentationTestRunner' % self.test_package])
if self._tests_failed(result):
message = 'Tests failed for project %s; result:\n%s' % (
self.name, '\n'.join(result))
_LOG.error(message)
return False, result
else:
_LOG.info('Tests passed for project %s', self.name)
return True, self._get_b64encoded_image()
def uninstall(self, strict=False):
"""Uninstall packages under worker.py only."""
handler = _get_strict_handler(strict)
_, result = _run(
[self._get_gradlew(), 'uninstallDebugTest'], cwd=self.path,
env=_Sdk.get_shell_env(), strict=False)
if self._gradlew_failed(result):
handler(
('Unable to uninstall debug test package from Project '
'%s') % self.name)
else:
_LOG.info(
'Uninstalled debug test package from Project %s', self.name)
_, result = _run(
[self._get_gradlew(), 'uninstallDebug'], cwd=self.path,
env=_Sdk.get_shell_env(), strict=False)
if self._gradlew_failed(result):
handler(
'Unable to uninstall debug package for Project %s' % self.name)
else:
_LOG.info('Uninstalled debug package from Project %s', self.name)
def _get_b64encoded_image(self):
local_path = os.path.join(self.path, _RESULT_IMAGE_NAME)
_run([
_Sdk.get_adb(), 'pull',
os.path.join('/sdcard/Robotium-screenshots/', _RESULT_IMAGE_NAME),
os.path.join(self.path, _RESULT_IMAGE_NAME)])
with open(local_path) as f:
return base64.b64encode(f.read())
def _get_gradlew(self):
return os.path.join(self.path, 'gradlew')
def _gradlew_failed(self, result):
return _GRADLEW_INSTALL_SUCCESS_NEEDLE not in result
def _tests_failed(self, result):
return _TEST_FAILURE_NEEDLE in result
class _Runtime(object):
_DEVICE_TMP = '/data/local/tmp'
def __init__(self, project_name, path, avd, port, sdcard, sdcard_size):
self.avd = avd
self.path = path
self.port = port
self.project_name = project_name
self.sdcard = sdcard
self.sdcard_size = sdcard_size
@classmethod
def from_config(cls, key, value):
return cls(
key, os.path.join(_RUNTIMES_PATH, key),
os.path.join(_RUNTIMES_PATH, key, value['avd']),
str(value['port']), os.path.join(_RUNTIMES_PATH, key,
value['sdcard']), value['sdcardSize'])
def block_until_ready(self, interval_msec=1000, timeout_sec=60*10):
start = datetime.datetime.utcnow()
while not self.ready():
now = datetime.datetime.utcnow()
delta_sec = (now - start).total_seconds()
if delta_sec > timeout_sec:
_die(
'Runtime %s timed out at %ss; aborting' % (
self.project_name, delta_sec))
_LOG.debug(
'Waiting %sms for runtime %s', interval_msec, self.project_name)
time.sleep(interval_msec / 1000.0)
def clean(self):
self._avd_delete()
self._sdcard_delete()
self._dir_delete()
def create(self):
self._dir_create()
self._sdcard_create()
self._avd_create()
def exists(self):
return (
self._dir_exists() and self._sdcard_exists() and self._avd_exists())
def ready(self):
return self._emulator_ready()
def start(self, headless=True):
self._emulator_start(headless=headless)
def stop(self):
self._emulator_stop()
def _avd_create(self, strict=False):
name = self._avd_name_get()
path = self._avd_path_get()
handler = _get_strict_handler(strict)
if self._avd_exists():
handler('Unable to create AVD at %s; already exists' % path)
return
code, result = _run([
_Sdk.get_android(), 'create', 'avd', '-n', name, '-t', 'android-19',
'--abi', 'default/armeabi-v7a', '-p', path],
proc_fn=self._avd_create_proc_fn)
if code:
_die('Unable to create avd %s; error was: %s' % (path, result))
_LOG.info('Created AVD named %s at %s', name, path)
def _avd_create_proc_fn(self, process):
process.stdin.write('\n')
process.stdin.flush()
def _avd_delete(self, strict=False):
handler = _get_strict_handler(strict)
name = self._avd_name_get()
path = self._avd_path_get()
if not self._avd_exists():
handler(
'Unable to delete AVD named %s from %s; does not exist' % (
name, path))
return
code, _ = _run(
[_Sdk.get_android(), 'delete', 'avd', '-n', name], strict=False)
if code:
_LOG.warning(
'Unable to remove AVD via Android SDK; falling back to manual '
'cleanup. This may not be entirely accurate.')
self._avd_delete_manually()
else:
_LOG.info('Deleted AVD named %s from %s', name, path)
def _avd_delete_manually(self):
name = self._avd_name_get()
path = self._avd_path_get()
message = 'Unable to remove AVD named %s from %s; does not exist' % (
name, path)
if os.path.exists(path):
shutil.rmtree(path)
message = 'Removed AVD named %s at %s' % (name, path)
_LOG.info(message)
# Path created by Android in addition to the path we specify when making
# the AVD.
internal_path = os.path.join(
'~', '.android', 'avd', self._avd_name_get().lower())
message = (
'Unable to remove internal AVD named %s from %s; does not '
'exist') % (name, internal_path)
if os.path.exists(internal_path):
shutil.rmtree(internal_path)
message = 'Removed internal AVD named %s at %s' % (
name, internal_path)
_LOG.info(message)
def _avd_exists(self):
return os.path.exists(self._avd_path_get())
def _avd_name_get(self):
return ('%s_avd' % self.project_name).lower()
def _avd_path_get(self):
return os.path.join(self._dir_get(), self._avd_name_get())
def _device_tmp_path_get(self, filename):
return os.path.join(self._DEVICE_TMP, filename)
def _dir_create(self, strict=False):
handler = _get_strict_handler(strict)
path = self._dir_get()
if self._dir_exists():
handler(
'Unable to create runtime directory %s; already exists' % path)
return
os.makedirs(path)
_LOG.info('Created runtime directory %s', path)
def _dir_delete(self, strict=False):
handler = _get_strict_handler(strict)
path = self._dir_get()
if not self._dir_exists():
handler(
'Unable to delete runtime directory %s; does not exist' % path)
return
shutil.rmtree(path)
_LOG.info('Removed runtime directory %s', path)
def _dir_get(self):
return os.path.join(_RUNTIMES_PATH, self.project_name)
def _dir_exists(self):
return os.path.exists(self._dir_get())
def _emulator_name_get(self):
return '%s-%s' % (_EMULATOR, self.port)
def _emulator_ready(self):
if not self._emulator_running():
return False
code, result = _run(
[_Sdk.get_adb(), 'shell', 'getprop', _BOOT_ANIMATION_PROPERTY],
strict=False)
if not code and result[0] == _BOOT_ANIMATION_STOPPED:
return True
return False
def _emulator_running(self):
_, result = _run([_Sdk.get_adb(), 'devices'])
for line in result:
if line.startswith(self._emulator_name_get()):
return True
return False
def _emulator_start(self, headless):
"""Start an emulator in a child process."""
def emulator(project_name, headless=True):
headless_args = ['-no-audio', '-no-window'] if headless else []
code, result = _run([
_Sdk.get_emulator(), '-avd', os.path.basename(self.avd),
'-sdcard', self.sdcard, '-port', self.port,
'-force-32bit'] + headless_args, env=_Sdk.get_shell_env())
if code:
_die(
'Error starting emulator for runtime %s; reason: %s' % (
project_name, '\n'.join(result)))
child = multiprocessing.Process(
target=emulator, args=(self.project_name, headless))
child.daemon = True
child.start()
_LOG.info(
'Emulator for runtime %s started on port %s',
self.project_name, self.port)
def _emulator_stop(self, strict=False):
handler = _get_strict_handler(strict)
if not self._emulator_running():
handler(
'Cannot stop emulator for runtime %s; not running' % (
self.project_name))
return
_run([
_Sdk.get_adb(), '-s', self._emulator_name_get(), 'emu', 'kill'])
_LOG.info('Emulator for runtime %s stopped', self.project_name)
def _sdcard_create(self, strict=False):
handler = _get_strict_handler(strict)
if self._sdcard_exists():
handler('Unable to create sdcard %s; already exists' % self.sdcard)
return
size = '%sM' % self.sdcard_size
code, result = _run([_Sdk.get_mksdcard(), size, self.sdcard])
if code:
_die(
'Unable to create sdcard %s; error was: %s' % (
self.sdcard, result))
_LOG.info('Created %s sdcard: %s', size, self.sdcard)
def _sdcard_delete(self, strict=False):
handler = _get_strict_handler(strict)
if not os.path.exists(self.sdcard):
handler('Unable to remove sdcard %s; does not exist' % self.sdcard)
return
os.remove(self.sdcard)
_LOG.info('Removed sdcard %s', self.sdcard)
def _sdcard_exists(self):
return os.path.exists(self.sdcard)
class _Sdk(object):
PATH = os.path.join(_RESOURCES_PATH, 'sdk')
_VERSION = 'adt-bundle-linux-x86_64-20140702'
_URL = 'https://dl.google.com/android/adt/%s.zip' % _VERSION
@classmethod
def delete(cls, strict=False):
handler = _get_strict_handler(strict)
if not cls._is_installed():
handler('Android SDK not installed')
return
shutil.rmtree(cls.PATH)
_LOG.info('Android SDK deleted from %s', cls.PATH)
@classmethod
def get_adb(cls):
return cls._get_tool('adb', directory='platform-tools')
@classmethod
def get_android(cls):
return cls._get_tool('android')
@classmethod
def get_emulator(cls):
return cls._get_tool('emulator')
@classmethod
def get_shell_env(cls):
display = os.environ.get('DISPLAY')
if not display:
_die('Could not get shell variable DISPLAY')
return {
_ANDROID_HOME: cls.PATH,
_ANDROID_SDK_HOME: os.path.expanduser('~'),
_DISPLAY: display,
}
@classmethod
def get_mksdcard(cls):
return cls._get_tool('mksdcard')
@classmethod
def install(cls):
cls._download()
cls._install_from_download()
cls._update()
@classmethod
def is_installed(cls):
return os.path.exists(cls.PATH)
@classmethod
def _accept_licenses(cls, process):
"""Scan through android sdk update output and accept licenses."""
seen = set()
for line in iter(process.stdout.readline, ''):
if _ACCEPT_LICENSE_NEEDLE in line and not process.poll():
license_name = re.findall(r"'(.+)'", line)[0]
if license_name not in seen:
seen.add(license_name)
process.stdin.write('y\n')
process.stdin.flush()
_LOG.info('Accepted license %s', license_name)
# TODO(johncox): figure out why this is needed to keep the process
# from hanging.
process.stdin.write('\n')
process.stdin.flush()
@classmethod
def _download(cls):
path = cls._get_download_path()
_LOG.info('Downloading Android SDK from %s to %s', cls._URL, path)
_run(['curl', '-o', path, cls._URL])
@classmethod
def _get_download_path(cls):
return os.path.join(_RESOURCES_TMP_PATH, 'android-sdk.zip')
@classmethod
def _get_tool(cls, name, directory='tools'):
path = os.path.join(cls.PATH, directory, name)
if not os.path.exists(path):
_die('SDK tool %s not found at %s' % (name, path))
return path
@classmethod
def _install_from_download(cls):
_run([
'unzip', cls._get_download_path(), '-d',
os.path.join(_RESOURCES_TMP_PATH,)])
shutil.move(
os.path.join(_RESOURCES_TMP_PATH, cls._VERSION, 'sdk'), cls.PATH)
_LOG.info('Android SDK installed in %s', cls.PATH)
@classmethod
def _update(cls):
_LOG.info('Updating SDK. This takes a long time; please be patient')
_run(
[cls.get_android(), 'update', 'sdk', '-a', '--no-ui'],
proc_fn=cls._accept_licenses)
class _TestEnvironment(object):
"""An environment for test execution.
Manages creation of filesystem for result storage, scratch space for copying
and patching over the golden project, and log redirection.
Lifecycle is:
* Initialize environment.
* Call set_up() to configure external dependencies (filesystem, loggers,
etc.).
* If operating on a project is desired, call set_up_projects().
* Do work.
* Always call tear_down().
"""
_OUT = 'out'
def __init__(self, ticket):
self._handler = None
self.path = self._get_path(ticket)
self._projects_set_up = False
self.out_path = os.path.join(self.path, self._OUT)
self.src_project = None
self.test_project = None
self.ticket = ticket
@classmethod
def get_test_run(cls, ticket):
root_path = cls._get_path(ticket)
json_path = cls._get_result_json_path(ticket)
test_run = TestRun()
if not (os.path.exists(root_path) and os.path.exists(json_path)):
test_run.set_status(TestRun.NOT_FOUND)
test_run.set_payload('No test results found')
return test_run
try:
with open(json_path) as f:
result = json.loads(f.read())
test_run.set_payload(result['payload'])
test_run.set_status(result['status'])
except: # Treat all errors the same. pylint: disable=bare-except
test_run.set_status(TestRun.CONTENTS_MALFORMED)
test_run.set_payload('Test result malformed')
return test_run
@classmethod
def _get_path(cls, ticket):
return os.path.join(_RESULTS_PATH, str(ticket))
@classmethod
def _get_result_json_path(cls, ticket):
return os.path.join(cls._get_path(ticket), cls._OUT, _RESULT_JSON_NAME)
def save(self, test_run):
json_path = os.path.join(self.out_path, _RESULT_JSON_NAME)
with open(json_path, 'w') as f:
f.write(json.dumps(test_run.to_dict()))
_LOG.info('Result saved to ' + json_path)
def set_up(self):
"""Sets up everything but projects."""
self._configure_filesystem()
self._configure_logging()
self._clean_old()
def set_up_projects(self, patches, src_project):
"""Sets up projects and applies patches."""
self._configure_projects(src_project)
self._copy_project()
for patch in patches:
self.test_project.patch(self._get_test_patch(patch))
self._projects_set_up = True
def tear_down(self):
"""Tears down both set_up() and set_up_projects()."""
self._copy_result()
self._remove_test_project()
self._revert_logging()
def _clean_old(self):
"""Cleans old test invocations from the filesystem.
The _TestEnvironment code path is hit on each web worker test
invocation, so we do cleanup here to keep the test results on the
filesystem from filling disk.
"""
now_sec = time.time()
for path in os.listdir(_RESULTS_PATH):
result_dir = os.path.join(_RESULTS_PATH, path)
delta_sec = now_sec - os.path.getmtime(result_dir)
if delta_sec >= _RESULTS_TTL_SEC:
shutil.rmtree(result_dir)
_LOG.info(
('Result directory %s too old (delta: %ssec; TTL: %ssec); '
'removed'), result_dir, delta_sec, _RESULTS_TTL_SEC)
def _configure_logging(self):
"""Also send log info to test project dir."""
self._handler = logging.FileHandler(os.path.join(self.path, 'log'))
self._handler.setLevel(_LOG.level)
_LOG.addHandler(self._handler)
def _configure_projects(self, src_project):
relative_editor_file = src_project.editor_file.split(
self._get_project_name_infix(src_project.name))[1]
test_project_path = os.path.join(self.path, src_project.name)
self.src_project = src_project
self.test_project = _Project(
src_project.name,
os.path.join(test_project_path, relative_editor_file),
src_project.package, test_project_path, src_project.test_class,
src_project.test_package)
def _copy_project(self):
shutil.copytree(self.src_project.path, self.test_project.path)
git_path = os.path.join(self.test_project.path, '.git')
gradle_path = os.path.join(self.test_project.path, '.gradle')
# Clean up files not needed for test builds/runs.
for prune_dir in [git_path, gradle_path]:
if os.path.exists(prune_dir):
shutil.rmtree(prune_dir)
_LOG.info(
'Project %s staged into %s',
self.test_project.name, self.test_project.path)
def _copy_result(self):
if not self._projects_set_up:
return
copy_from = os.path.join(self.test_project.path, _RESULT_IMAGE_NAME)
# When build/test fails there may not be a result image.
if not os.path.exists(copy_from):
_LOG.info('No result image found at ' + copy_from)
else:
copy_to = os.path.join(self.out_path, _RESULT_IMAGE_NAME)
shutil.copyfile(copy_from, copy_to)
_LOG.info('Result image saved to ' + copy_to)
def _configure_filesystem(self):
os.makedirs(self.path)
os.makedirs(self.out_path)
def _get_project_name_infix(self, project_name):
return '/%s/' % project_name
def _get_test_patch(self, patch):
# Rehome patch under test project.
_, suffix = patch.filename.split(
self._get_project_name_infix(self.src_project.name))
return Patch(
os.path.join(self.test_project.path, suffix), patch.contents)
def _remove_test_project(self):
if not self._projects_set_up:
return
# Entire test filesystem tree may have been removed already due to age.
if os.path.exists(self.test_project.path):
shutil.rmtree(self.test_project.path)
_LOG.info(
'Project %s unstaged from %s ',
self.test_project.name, self.test_project.path)
def _revert_logging(self):
if self._handler is not None:
_LOG.removeHandler(self._handler)
self._handler = None
if __name__ == '__main__':
main(_PARSER.parse_args())
|
__init__.py
|
import os
import subprocess
import threading
from multiprocessing import Process
from platypush.backend import Backend
from platypush.backend.http.app import application
from platypush.context import get_or_create_event_loop
from platypush.utils import get_ssl_server_context, set_thread_name
class HttpBackend(Backend):
"""
The HTTP backend is a general-purpose web server that you can leverage:
* To execute Platypush commands via HTTP calls. Example::
curl -XPOST -H 'Content-Type: application/json' -H "X-Token: your_token" \\
-d '{
"type":"request",
"target":"nodename",
"action":"tts.say",
"args": {"phrase":"This is a test"}
}' \\
http://localhost:8008/execute
* To interact with your system (and control plugins and backends) through the Platypush web panel,
by default available on your web root document. Any plugin that you have configured and available as a panel
plugin will appear on the web panel as well as a tab.
* To display a fullscreen dashboard with your configured widgets, by default available under ``/dashboard``
* To stream media over HTTP through the ``/media`` endpoint
Any plugin can register custom routes under ``platypush/backend/http/app/routes/plugins``.
Any additional route is managed as a Flask blueprint template and the `.py`
module can expose lists of routes to the main webapp through the
``__routes__`` object (a list of Flask blueprints).
Note that if you set up a main token, it will be required for any HTTP
interaction - either as ``X-Token`` HTTP header, on the query string
(attribute name: ``token``), as part of the JSON payload root (attribute
name: ``token``), or via HTTP basic auth (any username works).
Requires:
* **flask** (``pip install flask``)
* **redis** (``pip install redis``)
* **websockets** (``pip install websockets``)
* **python-dateutil** (``pip install python-dateutil``)
* **magic** (``pip install python-magic``), optional, for MIME type
support if you want to enable media streaming
* **uwsgi** (``pip install uwsgi`` plus uwsgi server installed on your
system if required) - optional but recommended. By default the
Platypush web server will run in a process spawned on the fly by
the HTTP backend. However, being a Flask app, it will serve clients
in a single thread and won't support many features of a full-blown
web server.
Base command to run the web server over uwsgi::
uwsgi --http :8008 --module platypush.backend.http.uwsgi --master --processes 4 --threads 4
Bear in mind that the main webapp is defined in ``platypush.backend.http.app:application``
and the WSGI startup script is stored under ``platypush/backend/http/uwsgi.py``.
"""
_DEFAULT_HTTP_PORT = 8008
_DEFAULT_WEBSOCKET_PORT = 8009
def __init__(self, port=_DEFAULT_HTTP_PORT,
websocket_port=_DEFAULT_WEBSOCKET_PORT,
disable_websocket=False, dashboard=None, resource_dirs=None,
ssl_cert=None, ssl_key=None, ssl_cafile=None, ssl_capath=None,
maps=None, run_externally=False, uwsgi_args=None, **kwargs):
"""
:param port: Listen port for the web server (default: 8008)
:type port: int
:param websocket_port: Listen port for the websocket server (default: 8009)
:type websocket_port: int
:param disable_websocket: Disable the websocket interface (default: False)
:type disable_websocket: bool
:param ssl_cert: Set it to the path of your certificate file if you want to enable HTTPS (default: None)
:type ssl_cert: str
:param ssl_key: Set it to the path of your key file if you want to enable HTTPS (default: None)
:type ssl_key: str
:param ssl_cafile: Set it to the path of your certificate authority file if you want to enable HTTPS
(default: None)
:type ssl_cafile: str
:param ssl_capath: Set it to the path of your certificate authority directory if you want to enable HTTPS
(default: None)
:type ssl_capath: str
:param resource_dirs: Static resources directories that will be
accessible through ``/resources/<path>``. It is expressed as a map
where the key is the relative path under ``/resources`` to expose and
the value is the absolute path to expose.
:type resource_dirs: dict[str, str]
:param dashboard: Set it if you want to use the dashboard service. It will contain the configuration for the
widgets to be used (look under ``platypush/backend/http/templates/widgets/`` for the available widgets).
Example configuration::
dashboard:
background_image: https://site/image.png
widgets: # Each row of the dashboard will have 6 columns
-
widget: calendar # Calendar widget
columns: 6
-
widget: music # Music widget
columns: 3
-
widget: date-time-weather # Date, time and weather widget
columns: 3
-
widget: image-carousel # Image carousel
columns: 6
# Absolute path (valid as long as it's a subdirectory of one of the available `resource_dirs`)
images_path: ~/Dropbox/Photos/carousel
refresh_seconds: 15
-
widget: rss-news # RSS feeds widget
# Requires backend.http.poll to be enabled with some RSS sources and write them to sqlite db
columns: 6
limit: 25
db: "sqlite:////home/user/.local/share/platypush/feeds/rss.db"
:type dashboard: dict
:param run_externally: If set, then the HTTP backend will not directly
spawn the web server. Set this option if you plan to run the webapp
in a separate web server (recommended), like uwsgi or uwsgi+nginx.
:type run_externally: bool
:param uwsgi_args: If ``run_externally`` is set and you would like the
HTTP backend to directly spawn and control the uWSGI application
server instance, then pass the list of uWSGI arguments through
this parameter. Some examples include::
# Start uWSGI instance listening on HTTP port 8008 with 4
# processes
['--plugin', 'python', '--http-socket', ':8008', '--master', '--processes', '4']
# Start uWSGI instance listening on uWSGI socket on port 3031.
# You can then use another full-blown web server, like nginx
# or Apache, to communicate with the uWSGI instance
['--plugin', 'python', '--socket', '127.0.0.1:3031', '--master', '--processes', '4']
:type uwsgi_args: list[str]
"""
super().__init__(**kwargs)
self.port = port
self.websocket_port = websocket_port
self.dashboard = dashboard or {}
self.maps = maps or {}
self.server_proc = None
self.disable_websocket = disable_websocket
self.websocket_thread = None
if resource_dirs:
self.resource_dirs = {name: os.path.abspath(
os.path.expanduser(d)) for name, d in resource_dirs.items()}
else:
self.resource_dirs = {}
self.active_websockets = set()
self.run_externally = run_externally
self.uwsgi_args = uwsgi_args or []
self.ssl_context = get_ssl_server_context(ssl_cert=ssl_cert,
ssl_key=ssl_key,
ssl_cafile=ssl_cafile,
ssl_capath=ssl_capath) \
if ssl_cert else None
if self.uwsgi_args:
self.uwsgi_args = [str(_) for _ in self.uwsgi_args] + \
['--module', 'platypush.backend.http.uwsgi', '--enable-threads']
self.local_base_url = '{proto}://localhost:{port}'.\
format(proto=('https' if ssl_cert else 'http'), port=self.port)
self._websocket_lock_timeout = 10
self._websocket_lock = threading.RLock()
self._websocket_locks = {}
def send_message(self, msg, **kwargs):
self.logger.warning('Use cURL or any HTTP client to query the HTTP backend')
def on_stop(self):
""" On backend stop """
super().on_stop()
self.logger.info('Received STOP event on HttpBackend')
if self.server_proc:
if isinstance(self.server_proc, subprocess.Popen):
self.server_proc.kill()
self.server_proc.wait()
else:
self.server_proc.terminate()
self.server_proc.join()
def _acquire_websocket_lock(self, ws):
try:
acquire_ok = self._websocket_lock.acquire(timeout=self._websocket_lock_timeout)
if not acquire_ok:
raise TimeoutError('Websocket lock acquire timeout')
addr = ws.remote_address
if addr not in self._websocket_locks:
self._websocket_locks[addr] = threading.RLock()
finally:
self._websocket_lock.release()
acquire_ok = self._websocket_locks[addr].acquire(timeout=self._websocket_lock_timeout)
if not acquire_ok:
raise TimeoutError('Websocket on address {} not ready to receive data'.format(addr))
def _release_websocket_lock(self, ws):
try:
acquire_ok = self._websocket_lock.acquire(timeout=self._websocket_lock_timeout)
if not acquire_ok:
raise TimeoutError('Websocket lock acquire timeout')
addr = ws.remote_address
if addr in self._websocket_locks:
self._websocket_locks[addr].release()
except Exception as e:
self.logger.warning('Unhandled exception while releasing websocket lock: {}'.format(str(e)))
finally:
self._websocket_lock.release()
def notify_web_clients(self, event):
""" Notify all the connected web clients (over websocket) of a new event """
import websockets
async def send_event(ws):
try:
self._acquire_websocket_lock(ws)
await ws.send(str(event))
except Exception as e:
self.logger.warning('Error on websocket send_event: {}'.format(e))
finally:
self._release_websocket_lock(ws)
loop = get_or_create_event_loop()
wss = self.active_websockets.copy()
for _ws in wss:
try:
loop.run_until_complete(send_event(_ws))
except websockets.exceptions.ConnectionClosed:
self.logger.warning('Websocket client {} connection lost'.format(_ws.remote_address))
self.active_websockets.remove(_ws)
if _ws.remote_address in self._websocket_locks:
del self._websocket_locks[_ws.remote_address]
def websocket(self):
""" Websocket main server """
import websockets
set_thread_name('WebsocketServer')
async def register_websocket(websocket, path):
address = websocket.remote_address if websocket.remote_address \
else '<unknown client>'
self.logger.info('New websocket connection from {} on path {}'.format(address, path))
self.active_websockets.add(websocket)
try:
await websocket.recv()
except websockets.exceptions.ConnectionClosed:
self.logger.info('Websocket client {} closed connection'.format(address))
self.active_websockets.remove(websocket)
if address in self._websocket_locks:
del self._websocket_locks[address]
websocket_args = {}
if self.ssl_context:
websocket_args['ssl'] = self.ssl_context
loop = get_or_create_event_loop()
loop.run_until_complete(
websockets.serve(register_websocket, '0.0.0.0', self.websocket_port,
**websocket_args))
loop.run_forever()
def _start_web_server(self):
def proc():
self.logger.info('Starting local web server on port {}'.format(self.port))
kwargs = {
'host': '0.0.0.0',
'port': self.port,
'use_reloader': False,
'debug': False,
}
if self.ssl_context:
kwargs['ssl_context'] = self.ssl_context
application.run(**kwargs)
return proc
def run(self):
super().run()
self.register_service(port=self.port)
if not self.disable_websocket:
self.logger.info('Initializing websocket interface')
self.websocket_thread = threading.Thread(target=self.websocket)
self.websocket_thread.start()
if not self.run_externally:
self.server_proc = Process(target=self._start_web_server(),
name='WebServer')
self.server_proc.start()
self.server_proc.join()
elif self.uwsgi_args:
uwsgi_cmd = ['uwsgi'] + self.uwsgi_args
self.logger.info('Starting uWSGI with arguments {}'.format(uwsgi_cmd))
self.server_proc = subprocess.Popen(uwsgi_cmd)
else:
self.logger.info('The web server is configured to be launched externally but ' +
'no uwsgi_args were provided. Make sure that you run another external service' +
'for the webserver (e.g. nginx)')
# vim:sw=4:ts=4:et:
|
__init__.py
|
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import StringIO
import shutil
import logging
import os
import sys
import threading
import time
import traceback
import unittest
import json
import pika
import yaml
from os.path import dirname
from os import path
from cloudify.utils import setup_logger
from cloudify.logs import create_event_message_prefix
import mock_plugins
from testenv.constants import MANAGER_REST_PORT
from testenv.constants import RABBITMQ_VERBOSE_MESSAGES_ENABLED
from testenv.constants import RABBITMQ_POLLING_ENABLED
from testenv.constants import FILE_SERVER_RESOURCES_URI
from testenv.constants import FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER
from testenv.constants import FILE_SERVER_BLUEPRINTS_FOLDER
from testenv.processes.elastic import ElasticSearchProcess
from testenv.processes.manager_rest import ManagerRestProcess
from testenv.processes.riemann import RiemannProcess
from testenv.processes.celery import CeleryWorkerProcess
from testenv import utils
logger = setup_logger('TESTENV')
setup_logger('cloudify.rest_client', logging.INFO)
testenv_instance = None
class TestCase(unittest.TestCase):
"""
A test case for cloudify integration tests.
"""
def setUp(self):
self.logger = setup_logger(self._testMethodName,
logging.INFO)
self.client = utils.create_rest_client()
utils.restore_provider_context()
TestEnvironment.start_celery_management_worker()
self.test_logs_file = path.join(testenv_instance.events_and_logs_dir,
'{0}.log'.format(self.id()))
testenv_instance.handle_logs = \
self._write_test_events_and_logs_to_file
def tearDown(self):
TestEnvironment.stop_celery_management_worker()
TestEnvironment.stop_all_celery_processes()
TestEnvironment.reset_elasticsearch_data()
def _write_test_events_and_logs_to_file(self, output, event):
with open(self.test_logs_file, 'a') as f:
f.write('{0}\n'.format(output))
def get_plugin_data(self,
plugin_name,
deployment_id):
"""
Retrieve the plugin state for a certain deployment.
:param deployment_id: the deployment id in question.
:param plugin_name: the plugin in question.
:return: plugin data relevant for the deployment.
:rtype dict
"""
return self._get_plugin_data(
plugin_name=plugin_name,
deployment_id=deployment_id
)
def clear_plugin_data(self, plugin_name):
"""
Clears plugin state.
:param plugin_name: the plugin in question.
"""
return self._clear_plugin_data(
plugin_name=plugin_name
)
def _get_plugin_data(self,
plugin_name,
deployment_id):
storage_file_path = os.path.join(
testenv_instance.plugins_storage_dir,
'{0}.json'.format(plugin_name)
)
if not os.path.exists(storage_file_path):
return {}
with open(storage_file_path, 'r') as f:
data = json.load(f)
if deployment_id not in data:
data[deployment_id] = {}
return data.get(deployment_id)
def _clear_plugin_data(self,
plugin_name):
storage_file_path = os.path.join(
testenv_instance.plugins_storage_dir,
'{0}.json'.format(plugin_name)
)
if os.path.exists(storage_file_path):
os.remove(storage_file_path)
@staticmethod
def do_assertions(assertions_func, timeout=10, **kwargs):
return utils.do_retries(assertions_func,
timeout,
AssertionError,
**kwargs)
@property
def riemann_workdir(self):
return TestEnvironment.riemann_workdir()
def publish_riemann_event(self,
deployment_id,
node_name,
node_id='',
host='localhost',
service='service',
state='',
metric=0,
ttl=60):
event = {
'host': host,
'service': service,
'state': state,
'metric': metric,
'time': int(time.time()),
'node_name': node_name,
'node_id': node_id,
'ttl': ttl
}
queue = '{0}-riemann'.format(deployment_id)
routing_key = deployment_id
utils.publish_event(queue,
routing_key,
event)
class ProcessModeTestCase(TestCase):
def setUp(self):
# can actually be any string
# besides the empty one
os.environ['PROCESS_MODE'] = 'True'
super(ProcessModeTestCase, self).setUp()
def tearDown(self):
# empty string means false
os.environ['PROCESS_MODE'] = ''
super(ProcessModeTestCase, self).tearDown()
class TestEnvironment(object):
manager_rest_process = None
elasticsearch_process = None
riemann_process = None
file_server_process = None
celery_management_worker_process = None
def __init__(self, test_working_dir):
super(TestEnvironment, self).__init__()
self.test_working_dir = test_working_dir
self.plugins_storage_dir = os.path.join(
self.test_working_dir,
'plugins-storage'
)
os.makedirs(self.plugins_storage_dir)
self.fileserver_dir = path.join(self.test_working_dir, 'fileserver')
self.rest_service_log_level = 'DEBUG'
self.rest_service_log_path = path.join(
self.test_working_dir, 'cloudify-rest-service.log')
self.rest_service_log_file_size_MB = 100
self.rest_service_log_files_backup_count = 20
self.securest_log_level = 'DEBUG'
self.securest_log_file = path.join(
self.test_working_dir, 'rest-security-audit.log')
self.securest_log_file_size_MB = 100
self.securest_log_files_backup_count = 20
self.events_and_logs_dir = \
path.join(self.test_working_dir, 'tests-events-and-logs')
os.mkdir(self.events_and_logs_dir)
def create(self):
try:
logger.info('Setting up test environment... workdir=[{0}]'
.format(self.test_working_dir))
# events/logs polling
start_events_and_logs_polling(
logs_handler_retriever=self._logs_handler_retriever)
self.start_elasticsearch()
self.start_riemann()
self.start_fileserver()
self.start_manager_rest()
self.create_management_worker()
except BaseException as error:
s_traceback = StringIO.StringIO()
traceback.print_exc(file=s_traceback)
logger.error("Error in test environment setup: %s", error)
logger.error(s_traceback.getvalue())
self.destroy()
raise
def create_management_worker(self):
mock_plugins_path = os.path.dirname(mock_plugins.__file__)
os.environ['MOCK_PLUGINS_PATH'] = mock_plugins_path
self.celery_management_worker_process = CeleryWorkerProcess(
queues=['cloudify.management'],
test_working_dir=self.test_working_dir,
# these plugins are already installed.
# so we just need to append to the includes.
# note that these are not mocks, but the actual production
# code plugins.
additional_includes=[
'riemann_controller.tasks',
'cloudify_system_workflows.deployment_environment',
'cloudify.plugins.workflows',
'diamond_agent.tasks',
'script_runner.tasks'
],
# we need higher concurrency since
# 'deployment_environment.create' calls
# 'plugin_installer.install' as a sub-task
# and they are both executed inside
# this worker
concurrency=2
)
# copy plugins to worker env
mock_plugins_path = os.path.dirname(mock_plugins.__file__)
shutil.copytree(
src=mock_plugins_path,
dst=self.celery_management_worker_process.envdir,
ignore=shutil.ignore_patterns('*.pyc')
)
def start_riemann(self):
riemann_config_path = self._get_riemann_config()
libs_path = self._get_libs_path()
self.riemann_process = RiemannProcess(riemann_config_path,
libs_path)
self.riemann_process.start()
def start_manager_rest(self):
from manager_rest.file_server import PORT as FS_PORT
file_server_base_uri = 'http://localhost:{0}'.format(FS_PORT)
self.manager_rest_process = ManagerRestProcess(
MANAGER_REST_PORT,
self.fileserver_dir,
file_server_base_uri,
FILE_SERVER_BLUEPRINTS_FOLDER,
FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER,
FILE_SERVER_RESOURCES_URI,
self.rest_service_log_level,
self.rest_service_log_path,
self.rest_service_log_file_size_MB,
self.rest_service_log_files_backup_count,
self.securest_log_level,
self.securest_log_file,
self.securest_log_file_size_MB,
self.securest_log_files_backup_count,
self.test_working_dir)
self.manager_rest_process.start()
def start_elasticsearch(self):
# elasticsearch
self.elasticsearch_process = ElasticSearchProcess()
self.elasticsearch_process.start()
def start_fileserver(self):
# workaround to update path
manager_rest_path = \
path.dirname(path.dirname(path.dirname(__file__)))
manager_rest_path = path.join(manager_rest_path, 'rest-service')
sys.path.append(manager_rest_path)
os.mkdir(self.fileserver_dir)
from manager_rest.file_server import FileServer
from manager_rest.utils import copy_resources
self.file_server_process = FileServer(self.fileserver_dir)
self.file_server_process.start()
# copy resources (base yaml etc)
resources_path = path.abspath(__file__)
resources_path = path.dirname(resources_path)
resources_path = path.dirname(resources_path)
resources_path = path.dirname(resources_path)
resources_path = path.join(resources_path, 'resources')
copy_resources(self.fileserver_dir, resources_path)
self.patch_source_urls(self.fileserver_dir)
def destroy(self):
logger.info('Destroying test environment...')
if self.riemann_process:
self.riemann_process.close()
if self.elasticsearch_process:
self.elasticsearch_process.close()
if self.manager_rest_process:
self.manager_rest_process.close()
if self.file_server_process:
self.file_server_process.stop()
self.delete_working_directory()
def delete_working_directory(self):
if os.path.exists(self.test_working_dir):
logger.info('Deleting test environment from: %s',
self.test_working_dir)
shutil.rmtree(self.test_working_dir, ignore_errors=True)
def handle_logs(self, output, event):
pass
def _logs_handler_retriever(self):
return self.handle_logs
@classmethod
def _get_riemann_config(cls):
manager_dir = cls._get_manager_root()
plugins_dir = os.path.join(manager_dir, 'plugins')
riemann_dir = os.path.join(plugins_dir, 'riemann-controller')
package_dir = os.path.join(riemann_dir, 'riemann_controller')
resources_dir = os.path.join(package_dir, 'resources')
manager_config = os.path.join(resources_dir, 'manager.config')
return manager_config
@classmethod
def _get_libs_path(cls):
return path.join(cls._get_manager_root(), '.libs')
@staticmethod
def reset_elasticsearch_data():
global testenv_instance
testenv_instance.elasticsearch_process.reset_data()
@staticmethod
def stop_celery_management_worker():
global testenv_instance
testenv_instance.celery_management_worker_process.stop()
@staticmethod
def read_celery_management_logs():
global testenv_instance
process = testenv_instance.celery_management_worker_process
return process.try_read_logfile()
@classmethod
def stop_all_celery_processes(cls):
logger.info('Shutting down all celery processes')
os.system("pkill -9 -f 'celery worker'")
@staticmethod
def start_celery_management_worker():
global testenv_instance
testenv_instance.celery_management_worker_process.start()
@staticmethod
def riemann_cleanup():
global testenv_instance
shutil.rmtree(TestEnvironment.riemann_workdir())
os.mkdir(TestEnvironment.riemann_workdir())
testenv_instance.riemann_process.restart()
@staticmethod
def riemann_workdir():
global testenv_instance
return testenv_instance.\
celery_management_worker_process.\
riemann_config_dir
@staticmethod
def _get_manager_root():
init_file = __file__
testenv_dir = dirname(init_file)
tests_dir = dirname(testenv_dir)
manager_dir = dirname(tests_dir)
return manager_dir
@staticmethod
def patch_source_urls(resources):
with open(path.join(resources,
'cloudify', 'types', 'types.yaml')) as f:
types_yaml = yaml.safe_load(f.read())
for policy_type in types_yaml.get('policy_types', {}).values():
in_path = '/cloudify/policies/'
source = policy_type['source']
if in_path in source:
source = source[source.index(in_path) + 1:]
policy_type['source'] = source
for policy_trigger in types_yaml.get('policy_triggers', {}).values():
in_path = '/cloudify/triggers/'
source = policy_trigger['source']
if in_path in source:
source = source[source.index(in_path) + 1:]
policy_trigger['source'] = source
with open(path.join(resources,
'cloudify', 'types', 'types.yaml'), 'w') as f:
f.write(yaml.safe_dump(types_yaml))
def start_events_and_logs_polling(logs_handler_retriever=None):
"""
Fetches events and logs from RabbitMQ.
"""
if not RABBITMQ_POLLING_ENABLED:
return
setup_logger('pika', logging.INFO)
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
queues = ['cloudify-events', 'cloudify-logs']
for q in queues:
channel.queue_declare(queue=q, auto_delete=True, durable=True,
exclusive=False)
def callback(ch, method, properties, body):
try:
event = json.loads(body)
if RABBITMQ_VERBOSE_MESSAGES_ENABLED:
output = '\n{0}'.format(json.dumps(event, indent=4))
else:
output = create_event_message_prefix(event)
logger.info(output)
if logs_handler_retriever:
logs_handler_retriever()(output, event)
except Exception as e:
logger.error('event/log format error - output: {0} [message={1}]'
.format(body, e.message))
s_traceback = StringIO.StringIO()
traceback.print_exc(file=s_traceback)
logger.error(s_traceback.getvalue())
def consume():
channel.basic_consume(callback, queue=queues[0], no_ack=True)
channel.basic_consume(callback, queue=queues[1], no_ack=True)
channel.start_consuming()
logger.info("Starting RabbitMQ events/logs polling - queues={0}".format(
queues))
polling_thread = threading.Thread(target=consume)
polling_thread.daemon = True
polling_thread.start()
|
ssh.py
|
# -*- coding: utf-8 -*-
import paramiko
import threading
from threading import Thread
from utils.tools.tools import get_key_obj
import traceback
import socket
import json
import os
import logging
import time
logging.basicConfig(level=logging.INFO)
zmodemszstart = b'rz\r**\x18B00000000000000\r\x8a'
zmodemszend = b'**\x18B0800000000022d\r\x8a'
zmodemrzstart = b'rz waiting to receive.**\x18B0100000023be50\r\x8a'
zmodemrzend = b'**\x18B0800000000022d\r\x8a'
zmodemcancel = b'\x18\x18\x18\x18\x18\x08\x08\x08\x08\x08'
PRIVATE_KEY_FILE = os.path.dirname(os.path.abspath(__file__)) + "/id_rsa"
class SSH:
def __init__(self, websocker, message, current_user, host, remote_addr):
self.websocker = websocker
self.message = message
self.cmd = ''
self.res = ''
self.current_user = current_user
self.host = host
self.remote_addr = remote_addr
self.zmodem = False
self.zmodemOO = False
# term 可以使用 ansi, linux, vt100, xterm, dumb,除了 dumb外其他都有颜色显示
def connect(self, host, user, password=None, ssh_key=None, port=22, timeout=30,
term='ansi', pty_width=80, pty_height=24):
try:
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if ssh_key:
key = get_key_obj(paramiko.RSAKey, pkey_obj=ssh_key, password=password) or \
get_key_obj(paramiko.DSSKey, pkey_obj=ssh_key, password=password) or \
get_key_obj(paramiko.ECDSAKey, pkey_obj=ssh_key, password=password) or \
get_key_obj(paramiko.Ed25519Key, pkey_obj=ssh_key, password=password)
private_key = paramiko.RSAKey.from_private_key_file(PRIVATE_KEY_FILE)
ssh_client.connect(username=user, hostname=host, port=port, pkey=private_key, timeout=timeout)
else:
ssh_client.connect(username=user, password=password, hostname=host, port=port, timeout=timeout)
transport = ssh_client.get_transport()
self.channel = transport.open_session()
self.channel.get_pty(term=term, width=pty_width, height=pty_height)
self.channel.invoke_shell()
for i in range(2):
recv = self.channel.recv(1024).decode('utf-8')
self.message['status'] = 0
self.message['message'] = recv
message = json.dumps(self.message)
self.websocker.send(message)
self.res += recv
# 创建3个线程将服务器返回的数据发送到django websocket(1个线程都可以)
Thread(target=self.websocket_to_django).start()
# Thread(target=self.websocket_to_django).start()
# Thread(target=self.websocket_to_django).start()
except:
self.message['status'] = 2
self.message['message'] = 'connection faild...'
message = json.dumps(self.message)
self.websocker.send(message)
self.websocker.close(3001)
def resize_pty(self, cols, rows):
self.channel.resize_pty(width=cols, height=rows)
def django_to_ssh(self, data):
try:
self.channel.send(data)
if data == '\r':
data = ''
if self.cmd != '\n':
current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
logging.info(" time: {}, src_host: {}, dest_host: {}, current_user: {}, command: {}".format(current_time, self.remote_addr, self.host, self.current_user, self.cmd))
self.cmd = ''
self.cmd += data
except Exception:
self.close()
def django_bytes_to_ssh(self, data):
try:
self.channel.send(data)
except Exception:
self.close()
def websocket_to_django(self):
try:
while True:
if self.zmodemOO:
self.zmodemOO = False
data = self.channel.recv(2)
if not len(data):
return
if data == b'OO':
self.websocker.send(bytes_data=data)
continue
else:
data = data + self.channel.recv(4096)
else:
data = self.channel.recv(4096)
if not len(data):
return
if self.zmodem:
if zmodemszend in data or zmodemrzend in data:
self.zmodem = False
if zmodemszend in data:
self.zmodemOO = True
if zmodemcancel in data:
self.zmodem = False
self.websocker.send(bytes_data=data)
else:
if zmodemszstart in data or zmodemrzstart in data:
self.zmodem = True
self.websocker.send(bytes_data=data)
else:
data = data.decode('utf-8')
self.message['status'] = 0
self.message['message'] = data
self.res += data
message = json.dumps(self.message)
self.websocker.send(message)
except:
self.close()
def close(self):
self.message['status'] = 1
self.message['message'] = 'connection closed...'
message = json.dumps(self.message)
self.websocker.send(message)
self.websocker.close()
self.channel.close()
def shell(self, data):
# 使用创建线程的方式发送数据到ssh,每次发送都是一个字符,可以不用线程
# 直接调用函数性能更好
# Thread(target=self.django_to_ssh, args=(data,)).start()
self.django_to_ssh(data)
|
test_betfairstream.py
|
import unittest
import socket
import time
import threading
from unittest import mock
from betfairlightweight.compat import json
from betfairlightweight.streaming.betfairstream import (
BetfairStream,
HistoricalStream,
HistoricalGeneratorStream,
)
from betfairlightweight.exceptions import SocketError, ListenerError
class BetfairStreamTest(unittest.TestCase):
def setUp(self):
self.mock_listener = mock.Mock()
self.mock_listener.on_data.return_value = False
self.unique_id = 1
self.app_key = "app_key"
self.session_token = "session_token"
self.timeout = 6
self.buffer_size = 1024
self.betfair_stream = BetfairStream(
self.unique_id,
self.mock_listener,
self.app_key,
self.session_token,
self.timeout,
self.buffer_size,
None,
)
def test_init(self):
assert self.betfair_stream._unique_id == self.unique_id
assert self.betfair_stream.listener == self.mock_listener
assert self.betfair_stream.app_key == self.app_key
assert self.betfair_stream.session_token == self.session_token
assert self.betfair_stream.timeout == self.timeout
assert self.betfair_stream.buffer_size == self.buffer_size
assert self.betfair_stream.host == "stream-api.betfair.com"
assert self.betfair_stream.receive_count == 0
assert self.betfair_stream.datetime_last_received is None
assert self.betfair_stream._socket is None
assert self.betfair_stream._running is False
def test_host_init(self):
betfair_stream = BetfairStream(
self.unique_id,
self.mock_listener,
self.app_key,
self.session_token,
self.timeout,
self.buffer_size,
"integration",
)
assert betfair_stream.host == "stream-api-integration.betfair.com"
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream.authenticate")
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream._connect")
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream._read_loop")
def test_start(self, mock_read_loop, mock_connect, mock_authenticate):
self.betfair_stream._running = True
self.betfair_stream.start()
mock_read_loop.assert_called_with()
self.betfair_stream._running = False
self.betfair_stream.start()
mock_connect.assert_called_with()
mock_authenticate.assert_called_with()
@mock.patch(
"betfairlightweight.streaming.betfairstream.BetfairStream._create_socket"
)
def test_connect(self, mock_create_socket):
self.betfair_stream._connect()
assert self.betfair_stream._running is True
mock_create_socket.assert_called_with()
def test_stop(self):
self.betfair_stream.stop()
assert self.betfair_stream._running is False
assert self.betfair_stream._socket is None
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream._send")
def test_authenticate(self, mock_send):
self.betfair_stream.authenticate()
mock_send.assert_called_with(
{
"id": self.betfair_stream._unique_id,
"appKey": self.app_key,
"session": self.session_token,
"op": "authentication",
}
)
self.betfair_stream.authenticate()
mock_send.assert_called_with(
{
"id": self.betfair_stream._unique_id,
"appKey": self.app_key,
"session": self.session_token,
"op": "authentication",
}
)
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream._send")
def test_heartbeat(self, mock_send):
self.betfair_stream.heartbeat()
mock_send.assert_called_with(
{"id": self.betfair_stream._unique_id, "op": "heartbeat"}
)
self.betfair_stream.heartbeat()
mock_send.assert_called_with(
{"id": self.betfair_stream._unique_id, "op": "heartbeat"}
)
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream._send")
def test_subscribe_to_markets(self, mock_send):
market_filter = {"test": 123}
market_data_filter = {"another_test": 123}
self.betfair_stream.subscribe_to_markets(
market_filter,
market_data_filter,
heartbeat_ms=1,
conflate_ms=2,
segmentation_enabled=False,
)
mock_send.assert_called_with(
{
"op": "marketSubscription",
"marketFilter": market_filter,
"id": self.betfair_stream._unique_id,
"marketDataFilter": market_data_filter,
"initialClk": None,
"clk": None,
"heartbeatMs": 1,
"conflateMs": 2,
"segmentationEnabled": False,
}
)
self.mock_listener.register_stream.assert_called_with(
self.betfair_stream._unique_id, "marketSubscription"
)
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream._send")
def test_resubscribe_to_markets(self, mock_send):
market_filter = {"test": 123}
market_data_filter = {"another_test": 123}
initial_clk = "abcdef"
clk = "abc"
self.betfair_stream.subscribe_to_markets(
market_filter,
market_data_filter,
initial_clk,
clk,
heartbeat_ms=1,
conflate_ms=2,
segmentation_enabled=False,
)
mock_send.assert_called_with(
{
"op": "marketSubscription",
"marketFilter": market_filter,
"id": self.betfair_stream._unique_id,
"marketDataFilter": market_data_filter,
"initialClk": initial_clk,
"clk": clk,
"heartbeatMs": 1,
"conflateMs": 2,
"segmentationEnabled": False,
}
)
assert not self.mock_listener.register_stream.called
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream._send")
def test_subscribe_to_orders(self, mock_send):
initial_clk = "abcdef"
clk = "abc"
self.betfair_stream.subscribe_to_orders(
initial_clk, clk, heartbeat_ms=1, conflate_ms=2, segmentation_enabled=False
)
mock_send.assert_called_with(
{
"orderFilter": "abcdef",
"id": self.betfair_stream._unique_id,
"op": "orderSubscription",
"initialClk": "abc",
"clk": None,
"heartbeatMs": 1,
"conflateMs": 2,
"segmentationEnabled": False,
}
)
self.mock_listener.register_stream.assert_called_with(
self.betfair_stream._unique_id, "orderSubscription"
)
@mock.patch("ssl.wrap_socket")
@mock.patch("socket.socket")
def test_create_socket(self, mock_socket, mock_wrap_socket):
self.betfair_stream._create_socket()
mock_socket.assert_called_with(socket.AF_INET, socket.SOCK_STREAM)
assert mock_wrap_socket.call_count == 1
@mock.patch(
"betfairlightweight.streaming.betfairstream.BetfairStream._data",
return_value=False,
)
@mock.patch(
"betfairlightweight.streaming.betfairstream.BetfairStream._receive_all",
return_value="{}\r\n",
)
def test_read_loop(self, mock_receive_all, mock_data):
mock_socket = mock.Mock()
self.betfair_stream._socket = mock_socket
self.betfair_stream._running = True
threading.Thread(target=self.betfair_stream._read_loop).start()
for i in range(0, 2):
time.sleep(0.1)
self.betfair_stream._running = False
time.sleep(0.1)
mock_data.assert_called_with("{}")
mock_receive_all.assert_called_with()
assert self.betfair_stream.datetime_last_received is not None
assert self.betfair_stream.receive_count > 0
def test_receive_all(self):
mock_socket = mock.Mock()
data_return_value = b'{"op":"status"}\r\n'
mock_socket.recv.return_value = data_return_value
self.betfair_stream._socket = mock_socket
data = self.betfair_stream._receive_all()
assert data == ""
self.betfair_stream._running = True
data = self.betfair_stream._receive_all()
mock_socket.recv.assert_called_with(self.buffer_size)
assert data == data_return_value.decode("utf-8")
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream.stop")
def test_receive_all_closed(self, mock_stop):
mock_socket = mock.Mock()
data_return_value = b""
mock_socket.recv.return_value = data_return_value
self.betfair_stream._socket = mock_socket
self.betfair_stream._running = True
with self.assertRaises(SocketError):
self.betfair_stream._receive_all()
mock_stop.assert_called_with()
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream.stop")
def test_receive_all_error(self, mock_stop):
mock_socket = mock.Mock()
self.betfair_stream._socket = mock_socket
self.betfair_stream._running = True
mock_socket.recv.side_effect = socket.error()
with self.assertRaises(SocketError):
self.betfair_stream._receive_all()
mock_stop.assert_called_with()
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream.stop")
def test_receive_all_timeout(self, mock_stop):
mock_socket = mock.Mock()
self.betfair_stream._socket = mock_socket
self.betfair_stream._running = True
mock_socket.recv.side_effect = socket.timeout()
with self.assertRaises(SocketError):
self.betfair_stream._receive_all()
mock_stop.assert_called_with()
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream.stop")
def test_data(self, mock_stop):
received_data = {"op": "status"}
with self.assertRaises(ListenerError):
self.betfair_stream._data(received_data)
self.mock_listener.on_data.assert_called_with(received_data)
assert mock_stop.called
self.mock_listener.on_data.return_value = True
self.betfair_stream._data(received_data)
self.mock_listener.on_data.assert_called_with(received_data)
assert mock_stop.call_count == 1
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream.authenticate")
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream._connect")
def test_send(self, mock_connect, mock_authenticate):
mock_socket = mock.Mock()
self.betfair_stream._socket = mock_socket
message = {"message": 1}
self.betfair_stream._send(message)
assert mock_connect.call_count == 1
assert mock_authenticate.call_count == 1
assert mock_socket.sendall.call_count == 1
try:
import orjson
rust = True
except:
rust = False
if rust:
mock_socket.sendall.assert_called_with(b'{"message":1}\r\n')
else:
mock_socket.sendall.assert_called_with(b'{"message": 1}\r\n')
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream.stop")
def test_send_timeout(self, mock_stop):
self.betfair_stream._running = True
mock_socket = mock.Mock()
self.betfair_stream._socket = mock_socket
mock_socket.sendall.side_effect = socket.timeout()
message = {"message": 1}
with self.assertRaises(SocketError):
self.betfair_stream._send(message)
mock_stop.assert_called_with()
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream.stop")
def test_send_error(self, mock_stop):
self.betfair_stream._running = True
mock_socket = mock.Mock()
self.betfair_stream._socket = mock_socket
mock_socket.sendall.side_effect = socket.error()
message = {"message": 1}
with self.assertRaises(SocketError):
self.betfair_stream._send(message)
mock_stop.assert_called_with()
def test_repr(self):
assert repr(self.betfair_stream) == "<BetfairStream>"
def test_str(self):
assert str(self.betfair_stream) == "<BetfairStream [not running]>"
self.betfair_stream._running = True
assert str(self.betfair_stream) == "<BetfairStream [running]>"
class HistoricalStreamTest(unittest.TestCase):
def setUp(self):
self.file_path = "tests/resources/historicaldata/BASIC-1.132153978"
self.listener = mock.Mock()
self.operation = "marketSubscription"
self.stream = HistoricalStream(self.file_path, self.listener, self.operation)
def test_init(self):
assert self.stream.file_path == self.file_path
assert self.stream.listener == self.listener
assert self.stream._running is False
assert self.stream.operation == self.operation
@mock.patch("betfairlightweight.endpoints.streaming.HistoricalStream._read_loop")
def test_start(self, mock_read_loop):
self.stream.start()
mock_read_loop.assert_called_with()
assert self.stream._running is True
def test_stop(self):
self.stream._running = True
self.stream.stop()
assert self.stream._running is False
@mock.patch("betfairlightweight.streaming.betfairstream.HistoricalStream.stop")
def test__read_loop(self, mock_stop):
self.stream._running = True
self.stream._read_loop()
self.assertEqual(self.listener.on_data.call_count, 480)
self.listener.on_data.snap()
mock_stop.assert_called_with()
self.assertTrue(self.stream._running)
self.listener.register_stream.assert_called_with(0, self.operation)
class HistoricalGeneratorStreamTest(unittest.TestCase):
def setUp(self):
self.file_path = "tests/resources/historicaldata/BASIC-1.132153978"
self.listener = mock.Mock()
self.operation = "marketSubscription"
self.stream = HistoricalGeneratorStream(
self.file_path, self.listener, self.operation
)
def test_init(self):
assert self.stream.file_path == self.file_path
assert self.stream.listener == self.listener
assert self.stream._running is False
assert self.stream.operation == self.operation
@mock.patch(
"betfairlightweight.streaming.betfairstream.HistoricalGeneratorStream._read_loop"
)
def test_get_generator(self, mock_read_loop):
self.assertEqual(self.stream.get_generator(), mock_read_loop)
@mock.patch(
"betfairlightweight.streaming.betfairstream.HistoricalGeneratorStream.stop"
)
def test__read_loop(self, mock_stop):
data = [i for i in self.stream._read_loop()]
self.assertEqual(len(data), 480)
self.assertEqual(self.listener.on_data.call_count, 480)
self.listener.on_data.snap()
mock_stop.assert_called_with()
self.assertTrue(self.stream._running)
self.listener.register_stream.assert_called_with(0, self.operation)
|
prometheus_client_fix.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
import threading
from prometheus_client import core
from prometheus_client.exposition import MetricsHandler
try:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
from urllib2 import build_opener, Request, HTTPHandler
from urllib import quote_plus
from urlparse import parse_qs, urlparse
except ImportError:
# Python 3
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from urllib.request import build_opener, Request, HTTPHandler
from urllib.parse import quote_plus, parse_qs, urlparse
class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
"""Thread per request HTTP server."""
# Make worker threads "fire and forget". Beginning with Python 3.7 this
# prevents a memory leak because ``ThreadingMixIn`` starts to gather all
# non-daemon threads in a list in order to join on them at server close.
# Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the
# same as Python 3.7's ``ThreadingHTTPServer``.
daemon_threads = True
def start_http_server(port, addr='', registry=core.REGISTRY):
"""Starts an HTTP server for prometheus metrics as a daemon thread"""
CustomMetricsHandler = MetricsHandler.factory(registry)
httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
|
telegram.py
|
import os, time, multiprocessing
import telepot
import telegram_cfg
import objtrack
p = multiprocessing.Process()
act = multiprocessing.Value('i', 0)
def sendCapture(chat_id):
bot.sendPhoto(chat_id=chat_id, photo=open('capture.jpg', 'rb'), caption='\U0001F4F7 Instant capture')
act.value = 0
def sendVideo(chat_id):
bot.sendVideo(chat_id=chat_id, video=open('record.mp4', 'rb'), caption='\U0001F3A5 Instant 5 seconds movie')
act.value = 0
def sendAlert(chat_id):
bot.sendPhoto(chat_id=chat_id, photo=open('alert.jpg', 'rb'), caption='\U0001F6A8 A movement has been detected')
act.value = 0
def handle(msg):
global p
chat_id = msg['chat']['id']
command = msg['text']
if str(msg['from']['id']) not in telegram_cfg.TELEGRAM_USRID_WHITELIST:
bot.sendMessage(chat_id=chat_id, text='\u2694\U0001F512 Forbidden access')
else:
print('Receive: %s' % command)
if command == '/runtracking':
if p.is_alive():
bot.sendMessage(chat_id=chat_id, text='Tracking already running')
else:
dic_fun = {'capture': sendCapture, 'video': sendVideo, 'alert': sendAlert}
p = multiprocessing.Process(target=objtrack.startTracking, args=(True, chat_id, dic_fun, act))
p.start()
elif command == '/stoptracking':
if p.is_alive():
act.value = 3
p.join()
act.value = 0
else:
bot.sendMessage(chat_id=chat_id, text='Tracking is not running')
elif command == '/status':
bot.sendMessage(chat_id=chat_id, text=('\U0001F7E2' if p.is_alive() else '\U0001F534'))
elif command == '/capture':
if p.is_alive():
act.value = 1
else:
bot.sendMessage(chat_id=chat_id, text='Tracking is not running')
elif command == '/video':
if p.is_alive():
act.value = 2
bot.sendMessage(chat_id=chat_id, text='\u231B The video is recording')
else:
bot.sendMessage(chat_id=chat_id, text='Tracking is not running')
# Replace the next line with your token
bot = telepot.Bot(token=telegram_cfg.TELEGRAM_API)
bot.message_loop(handle)
print('Telegram Bot is listening')
while 1:
time.sleep(25)
|
track_visualisation_rt_near.py
|
from object_tracking_rt_near import track_objects_realtime, imshow_resized
import math
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
import multiprocessing
import csv
class TrackPlot():
def __init__(self, track_id):
self.id = track_id
self.xs = np.array([], dtype=int)
self.ys = np.array([], dtype=int)
self.frameNos = np.array([], dtype=int)
self.times = np.array([])
self.colourized_times = []
self.lastSeen = 0
self.Q = 0
self.turning_angle = np.array([]) # Degrees
self.curvature = np.array([])
self.pace = np.array([])
self.track_feature_variable = np.array([])
def plot_track(self):
print(f"Track {self.id} being plotted...")
plt.scatter(self.xs, self.ys, c=self.colourized_times, marker='+')
plt.show()
def update(self, location, frame_no, time=None):
self.xs = np.append(self.xs, [int(location[0])])
self.ys = np.append(self.ys, [int(location[1])])
self.frameNos = np.append(self.frameNos, [frame_no])
if time is not None:
self.times = np.append(self.times, time)
self.lastSeen = frame_no
def calculate_track_feature_variable(self, frameNo):
# Check if there is enough data to calculate turning angle (at least 3 points)
# And that the data is still current
if len(self.frameNos) >= 3 and self.frameNos[-1] == frameNo:
# Check if the last 3 frames are consecutive
if self.frameNos[-2] == self.frameNos[-1] - 1 and self.frameNos[-3] == self.frameNos[-2] - 1:
# Retrieve the x and y values of the last 3 points and introduce t for readability
t = 2
x, y = self.xs[-3:], self.ys[-3:]
# Turning angle
xs = np.array([x[t] - x[t - 1], x[t - 1] - x[t - 2]])
ys = np.array([y[t] - y[t - 1], y[t - 1] - y[t - 2]])
# arctan2 returns the element-wise arc tangent, choosing the element correctly
# Special angles excluding infinities:
# y = +/- 0, x = +0, theta = +/- 0
# y = +/- 0, x = -0, theta = +/- pi
# whats a positive or negative 0?
heading = np.arctan2(ys, xs) * 180 / np.pi
turning_angle = heading[1] - heading[0]
self.turning_angle = np.append(self.turning_angle, turning_angle)
# Curvature
a = np.sqrt((x[t] - x[t - 2]) ** 2 + (y[t] - y[t - 2]) ** 2)
b = np.sqrt((x[t - 1] - x[t - 2]) ** 2 + (y[t - 1] - y[t - 2]) ** 2)
c = np.sqrt((x[t] - x[t - 1]) ** 2 + (y[t] - y[t - 1]) ** 2)
if b == 0 or c == 0:
curvature = 0
else:
curvature = np.arccos((a ** 2 - b ** 2 - c ** 2) / (2 * b * c))
# For whatever reason, the arccos of 1.0000000000000002 is nan
if np.isnan(curvature):
curvature = 0
self.curvature = np.append(self.curvature, curvature)
# Pace
# Check if the data was returned in real time
if self.times.size != 0: # If so, dt is the difference in the time each consecutive frame was read
dt = self.times[-1] - self.times[-2]
else:
# assume 30 FPS
dt = 1 / 30
pace = c / dt
self.pace = np.append(self.pace, pace)
track_feature_variable = np.mean(self.turning_angle) * np.mean(self.curvature) * np.mean(self.pace)
self.track_feature_variable = np.append(self.track_feature_variable, track_feature_variable)
def plot_tracks_realtime(filename=0):
plotting_queue = multiprocessing.Queue()
get_results_p = multiprocessing.Process(target=get_results, args=(plotting_queue, filename))
plot_results_p = multiprocessing.Process(target=plot_results, args=(plotting_queue, filename))
get_results_p.start()
plot_results_p.start()
get_results_p.join()
plot_results_p.join()
def get_results(q1, filename):
generator = track_objects_realtime(filename)
for item in generator:
q1.put(item)
def plot_results(q, filename):
origin = [0, 0]
track_ids = []
track_plots = []
plot_history = 200
colours = [''] * plot_history
for i in range(plot_history):
colours[i] = scalar_to_rgb(i, plot_history)
frame_no = 0
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.5
cap = cv2.VideoCapture(filename)
plot_out = cv2.VideoWriter('../data/video_plot.mp4', cv2.VideoWriter_fourcc(*'h264'),
int(cap.get(cv2.CAP_PROP_FPS)),
(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
cap.release()
new_data = False
last_update = time.time()
while True:
while not q.empty():
new_data = True
item = q.get()
tracks, origin, frame_no, frame, frame_start = item
for track in tracks:
track_id = track[0]
if track_id not in track_ids: # First occurrence of the track
track_ids.append(track_id)
track_plots.append(TrackPlot(track_id))
track_plot = track_plots[track_ids.index(track_id)]
if frame_start != False:
track_plot.update(track[3], frame_no, frame_start)
else:
track_plot.update(track[3], frame_no)
track_plot.calculate_track_feature_variable(frame_no)
last_update = time.time()
if new_data:
for track_plot in track_plots:
idxs = np.where(np.logical_and(track_plot.frameNos > frame_no - plot_history,
track_plot.frameNos <= frame_no))[0]
for idx in idxs:
cv2.circle(frame, (track_plot.xs[idx] - origin[0], track_plot.ys[idx] - origin[1]),
3, colours[track_plot.frameNos[idx] - frame_no + plot_history - 1][::-1], -1)
if len(idxs) != 0:
cv2.putText(frame, f"ID: {track_plot.id}",
(track_plot.xs[idx] - origin[0], track_plot.ys[idx] - origin[1] + 15),
font, font_scale, (0, 0, 255), 1, cv2.LINE_AA)
if track_plot.track_feature_variable.size != 0:
cv2.putText(frame, f"Xj: {np.mean(track_plot.track_feature_variable):.3f}",
(track_plot.xs[idx] - origin[0], track_plot.ys[idx] - origin[1] + 30),
font, font_scale, (0, 255, 0), 1, cv2.LINE_AA)
plot_out.write(frame)
imshow_resized("plot", frame)
new_data = False
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if (time.time() - last_update) > 5 and not new_data:
print("Timeout: Terminating plot")
break
plot_out.release()
cv2.destroyAllWindows()
plot_track_feature_variable(track_plots)
export_data_as_csv(track_plots)
def delete_track_plots():
pass
def plot_track_feature_variable(track_plots):
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.set_xlabel('Frame Number')
ax.set_ylabel('Track Feature Variable Xj')
for track_plot in track_plots:
# Check that there's track feature variable data at all
if track_plot.track_feature_variable.size != 0:
ax.plot(track_plot.frameNos[2:], track_plot.track_feature_variable)
ax.text(track_plot.frameNos[-1], track_plot.track_feature_variable[-1],
f"{track_plot.id}")
plt.show()
def export_data_as_csv(track_plots):
data = []
max_frame = 0
for track_plot in track_plots:
# Check that there's track feature variable data at all
if track_plot.track_feature_variable.size != 0:
# Check if the data has enough rows to accommodate the data
if track_plot.frameNos[-1] > max_frame:
# Add the required number of extra rows
data.extend([[i] for i in range(max_frame, track_plot.frameNos[-1] + 1)])
max_frame = track_plot.frameNos[-1]
for idx, frame in enumerate(track_plot.frameNos):
# Track feature variable is only calculated on the 3rd frame
if idx >= 2:
data[frame - 1].extend([track_plot.id,
track_plot.xs[idx],
track_plot.ys[idx],
track_plot.track_feature_variable[idx - 2]])
with open('../data/data_out.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
for line in data:
writer.writerow(line)
def rgb_to_hex(r, g, b):
return '#%02x%02x%02x' % (r, g, b)
def scalar_to_rgb(scalar_value, max_value):
f = scalar_value / max_value
a = (1 - f) * 5
x = math.floor(a)
y = math.floor(255 * (a - x))
if x == 0:
return (255, y, 0)
elif x == 1:
return (255, 255, 0)
elif x == 2:
return (0, 255, y)
elif x == 3:
return (0, 255, 255)
elif x == 4:
return (y, 0, 255)
else: # x == 5:
return (255, 0, 255)
if __name__ == "__main__":
plot_tracks_realtime("00214_Trim.mp4")
|
africa.py
|
# import package
import africastalking
from django.shortcuts import redirect
from . gateway import AfricasTalkingGateway, AfricasTalkingGatewayException
from .models import Event
from multiprocessing import Process
import schedule
import time
import datetime as dt
def sms(request):
print('smsing')
phones = []
event = Event.objects.all()
for x in event:
if x.user.profile.phone_number:
numbers = x.user.profile.phone_number
message = x.notes
print(numbers)
phones.append(numbers)
# Initialize SDK
username = "brighton" # use 'sandbox' for development in the test environment
api_key = "18696560f647f921b072950f4ef9e5c24a76e583ec26b6ceb4f2ef6d34403daa" # use your sandbox app API key for development in the test environment
africastalking.initialize(username, api_key)
# Initialize a service e.g. SMS
sms = africastalking.SMS
# Use the service synchronously
response = sms.send(message, phones)
print(response)
# Or use it asynchronously
def on_finish(error, response):
if error is not None:
raise error
print(response)
#
# sms.send("Hello Message!", ["+2547xxxxxx"], callback=on_finish)
return redirect('home')
def waiter():
while True:
print('xxs')
now = dt.datetime.now().strftime('%H:%M:%S')
event = Event.objects.all()
for x in event:
print('vbnmk')
print(x.start_time)
if str(x.start_time) == now :
sms(x)
print('matd6f7yughiojopjihgf;vcjihuyi ukvfkjdbcknxvb bfidukhcvx oifvdkjchvoufedkjchvouefvdkjhcvo fekjdchvbiu kjfhdcbvi hjfgdvbv ijhfdhbui fkdjhbcv nuifkdjhb nifdkjhckv nufidkjchb nifdkjcmbyftduyiuhgf7tuygched')
continue
else:
print('badoedrftgyhujikolp;['']')
print('noma ')
continue
Process(target=waiter).start()
|
pydoc.py
|
#!/usr/bin/env python
# -*- coding: Latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://www.python.org/doc/current/lib/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision: 43347 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from collections import deque
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
if _re_stripid.search(repr(Exception)):
return _re_stripid.sub(r'\1', text)
return text
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
if name in ['__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__']: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ['.py', '.pyc', '.pyo']:
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (0, None))
if lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = split(module.__doc__ or '', '\n')[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, (exc, value, tb)):
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and \
split(lower(str(value)))[:2] == ['no', 'module']:
# The module was not found.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"http://www.python.org/doc/current/lib")
basedir = os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages'))))):
htmlfile = "module-%s.html" % object.__name__
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), htmlfile)
else:
docloc = os.path.join(docloc, htmlfile)
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''
<!doctype html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)/cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100/cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, (name, path, ispackage, shadowed)):
"""Make a link for a module or package to display in an index."""
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/peps/pep-%04d.html' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
modnames = []
for file in os.listdir(object.__path__[0]):
path = os.path.join(object.__path__[0], file)
modname = inspect.getmodulename(file)
if modname != '__init__':
if modname and modname not in modnames:
modpkgs.append((modname, name, 0, 0))
modnames.append(modname)
elif ispackage(path):
modpkgs.append((file, name, 1, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda (key, value), s=self: s.modulelink(value))
result = result + self.bigsection(
'Modules', '#fffff', '#aa55cc', contents)
if classes:
classlist = map(lambda (key, value): value, classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name), name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spillproperties(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docproperty(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if callable(value) or inspect.isdatadescriptor(value):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda (name, kind, cls, value): visiblename(name),
inspect.classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
value = getattr(object, key)
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
attrs.sort(key=lambda t: t[0])
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spillproperties('Properties %s' % tag, attrs,
lambda t: t[1] == 'property')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docproperty(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
for attr, tag in [('fget', '<em>get</em>'),
('fset', '<em>set</em>'),
('fdel', '<em>delete</em>')]:
func = getattr(value, attr)
if func is not None:
base = self.document(func, tag, mod)
push('<dd>%s</dd>\n' % base)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docproperty(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
seen = {}
files = os.listdir(dir)
def found(name, ispackage,
modpkgs=modpkgs, shadowed=shadowed, seen=seen):
if name not in seen:
modpkgs.append((name, '', ispackage, name in shadowed))
seen[name] = 1
shadowed[name] = 1
# Package spam/__init__.py takes precedence over module spam.py.
for file in files:
path = os.path.join(dir, file)
if ispackage(path): found(file, 1)
for file in files:
path = os.path.join(dir, file)
if os.path.isfile(path):
modname = inspect.getmodulename(file)
if modname: found(modname, 0)
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
if hasattr(object, '__path__'):
modpkgs = []
for file in os.listdir(object.__path__[0]):
path = os.path.join(object.__path__[0], file)
modname = inspect.getmodulename(file)
if modname != '__init__':
if modname and modname not in modpkgs:
modpkgs.append(modname)
elif ispackage(path):
modpkgs.append(file + ' (package)')
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
if classes:
classlist = map(lambda (key, value): value, classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
return result
def docclass(self, object, name=None, mod=None):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name),
name, mod, object))
return attrs
def spillproperties(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docproperty(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if callable(value) or inspect.isdatadescriptor(value):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda (name, kind, cls, value): visiblename(name),
inspect.classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
filter(lambda t: not t[0].startswith('_'), attrs)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spillproperties("Properties %s:\n" % tag, attrs,
lambda t: t[1] == 'property')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docproperty(self, name, value, mod):
results = []
push = results.append
if name:
push(name)
need_blank_after_doc = 0
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
need_blank_after_doc = 1
for attr, tag in [('fget', '<get>'),
('fset', '<set>'),
('fdel', '<delete>')]:
func = getattr(value, attr)
if func is not None:
if need_blank_after_doc:
push('')
need_blank_after_doc = 0
base = self.document(func, tag, mod)
push(self.indent(base))
return '\n'.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docproperty(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if os.environ.get('TERM') in ['dumb', 'emacs']:
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ['dumb', 'emacs']:
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more %s' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' ' + filename)
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = split(plain(text), '\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ['q', 'Q']:
sys.stdout.write('\r \r')
break
elif c in ['\r', '\n']:
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ['b', 'B', '\x1b']:
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
for part in parts[n:]:
try: object = getattr(object, part)
except AttributeError: return None
return object
else:
if hasattr(__builtin__, path):
return getattr(__builtin__, path)
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
return thing, getattr(thing, '__name__', None)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
pager(title % desc + '\n\n' + text.document(object, name))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for file in os.listdir(dir):
path = os.path.join(dir, file)
if ispackage(path):
writedocs(path, pkgpath + file + '.', done)
elif os.path.isfile(path):
modname = inspect.getmodulename(path)
if modname:
if modname == '__init__':
modname = pkgpath[:-1] # remove trailing period
else:
modname = pkgpath + modname
if modname not in done:
done[modname] = 1
writedoc(modname)
class Helper:
keywords = {
'and': 'BOOLEAN',
'assert': ('ref/assert', ''),
'break': ('ref/break', 'while for'),
'class': ('ref/class', 'CLASSES SPECIALMETHODS'),
'continue': ('ref/continue', 'while for'),
'def': ('ref/function', ''),
'del': ('ref/del', 'BASICMETHODS'),
'elif': 'if',
'else': ('ref/if', 'while for'),
'except': 'try',
'exec': ('ref/exec', ''),
'finally': 'try',
'for': ('ref/for', 'break continue while'),
'from': 'import',
'global': ('ref/global', 'NAMESPACES'),
'if': ('ref/if', 'TRUTHVALUE'),
'import': ('ref/import', 'MODULES'),
'in': ('ref/comparisons', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('ref/lambdas', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('ref/pass', ''),
'print': ('ref/print', ''),
'raise': ('ref/raise', 'EXCEPTIONS'),
'return': ('ref/return', 'FUNCTIONS'),
'try': ('ref/try', 'EXCEPTIONS'),
'while': ('ref/while', 'break continue if TRUTHVALUE'),
'yield': ('ref/yield', ''),
}
topics = {
'TYPES': ('ref/types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('ref/strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING TYPES'),
'STRINGMETHODS': ('lib/string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('lib/typesseq-strings', 'OPERATORS'),
'UNICODE': ('ref/strings', 'encodings unicode SEQUENCES STRINGMETHODS FORMATTING TYPES'),
'NUMBERS': ('ref/numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('ref/integers', 'int range'),
'FLOAT': ('ref/floating', 'float math'),
'COMPLEX': ('ref/imaginary', 'complex cmath'),
'SEQUENCES': ('lib/typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('lib/typesfunctions', 'def TYPES'),
'METHODS': ('lib/typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('lib/bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('lib/bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('lib/bltin-null-object', ''),
'ELLIPSIS': ('lib/bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('lib/bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('lib/specialattrs', ''),
'CLASSES': ('ref/types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('lib/typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('ref/summary', 'lambda or and not in is BOOLEAN COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('ref/objects', 'TYPES'),
'SPECIALMETHODS': ('ref/specialnames', 'BASICMETHODS ATTRIBUTEMETHODS CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('ref/customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('ref/attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('ref/callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('ref/sequence-types', 'SEQUENCES SEQUENCEMETHODS2 SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('ref/sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 SPECIALMETHODS'),
'MAPPINGMETHODS': ('ref/sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('ref/numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT SPECIALMETHODS'),
'EXECUTION': ('ref/execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('ref/naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('ref/dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('ref/exceptions', 'try except finally raise'),
'COERCIONS': ('ref/coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('ref/conversions', 'COERCIONS'),
'IDENTIFIERS': ('ref/identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('ref/id-classes', ''),
'PRIVATENAMES': ('ref/atom-identifiers', ''),
'LITERALS': ('ref/atom-literals', 'STRINGS BACKQUOTES NUMBERS TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('ref/exprlists', 'TUPLES LITERALS'),
'LISTS': ('lib/typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('ref/lists', 'LISTS LITERALS'),
'DICTIONARIES': ('lib/typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('ref/dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('ref/string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('ref/attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('ref/subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('ref/slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('ref/calls', 'EXPRESSIONS'),
'POWER': ('ref/power', 'EXPRESSIONS'),
'UNARY': ('ref/unary', 'EXPRESSIONS'),
'BINARY': ('ref/binary', 'EXPRESSIONS'),
'SHIFTING': ('ref/shifting', 'EXPRESSIONS'),
'BITWISE': ('ref/bitwise', 'EXPRESSIONS'),
'COMPARISON': ('ref/comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('ref/Booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('ref/assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('ref/augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('ref/compound', 'for while break continue'),
'TRUTHVALUE': ('lib/truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('lib/module-pdb', 'pdb'),
}
def __init__(self, input, output):
self.input = input
self.output = output
self.docdir = None
execdir = os.path.dirname(sys.executable)
homedir = os.environ.get('PYTHONHOME')
for dir in [os.environ.get('PYTHONDOCS'),
homedir and os.path.join(homedir, 'doc'),
os.path.join(execdir, 'doc'),
'/usr/doc/python-docs-' + split(sys.version)[0],
'/usr/doc/python-' + split(sys.version)[0],
'/usr/doc/python-docs-' + sys.version[:3],
'/usr/doc/python-' + sys.version[:3],
os.path.join(sys.prefix, 'Resources/English.lproj/Documentation')]:
if dir and os.path.isdir(os.path.join(dir, 'lib')):
self.docdir = dir
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
def __call__(self, request=None):
if request is not None:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ['q', 'quit']: break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://www.python.org/doc/tut/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % sys.version[:3])
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic):
if not self.docdir:
self.output.write('''
Sorry, topic and keyword documentation is not available because the Python
HTML documentation files could not be found. If you have installed them,
please set the environment variable PYTHONDOCS to indicate their location.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target)
filename, xrefs = target
filename = self.docdir + '/' + filename + '.html'
try:
file = open(filename)
except:
self.output.write('could not read docs from %s\n' % filename)
return
divpat = re.compile('<div[^>]*navigat.*?</div.*?>', re.I | re.S)
addrpat = re.compile('<address.*?>.*?</address.*?>', re.I | re.S)
document = re.sub(addrpat, '', re.sub(divpat, '', file.read()))
file.close()
import htmllib, formatter, StringIO
buffer = StringIO.StringIO()
parser = htmllib.HTMLParser(
formatter.AbstractFormatter(formatter.DumbWriter(buffer)))
parser.start_table = parser.do_p
parser.end_table = lambda parser=parser: parser.do_p({})
parser.start_tr = parser.do_br
parser.start_td = parser.start_th = lambda a, b=buffer: b.write('\t')
parser.feed(document)
buffer = replace(buffer.getvalue(), '\xa0', ' ', '\n', '\n ')
pager(' ' + strip(buffer) + '\n')
if xrefs:
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
ModuleScanner().run(callback)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper(sys.stdin, sys.stdout)
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner(Scanner):
"""An interruptible scanner that searches module synopses."""
def __init__(self):
roots = map(lambda dir: (dir, ''), pathdirs())
Scanner.__init__(self, roots, self.submodules, self.isnewpackage)
self.inodes = map(lambda (dir, pkg): os.stat(dir).st_ino, roots)
def submodules(self, (dir, package)):
children = []
for file in os.listdir(dir):
path = os.path.join(dir, file)
if ispackage(path):
children.append((path, package + (package and '.') + file))
else:
children.append((path, package))
children.sort() # so that spam.py comes before spam.pyc or spam.pyo
return children
def isnewpackage(self, (dir, package)):
inode = os.path.exists(dir) and os.stat(dir).st_ino
if not (os.path.islink(dir) and inode in self.inodes):
self.inodes.append(inode) # detect circular symbolic links
return ispackage(dir)
return False
def run(self, callback, key=None, completer=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
desc = split(__import__(modname).__doc__ or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
while not self.quit:
node = self.next()
if not node: break
path, package = node
modname = inspect.getmodulename(path)
if os.path.isfile(path) and modname:
modname = package + (package and '.') + modname
if not modname in seen:
seen[modname] = 1 # if we see spam.py, skip spam.pyc
if key is None:
callback(path, modname, '')
else:
desc = synopsis(path) or ''
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer: completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
try: import warnings
except ImportError: pass
else: warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in pathdirs():
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost'
self.address = ('', port)
self.url = 'http://%s:%d/' % (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
elif sys.platform == 'mac':
try: import ic
except ImportError: pass
else: ic.launchurl(url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default.
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
watchdog.py
|
# -*- coding: utf-8 -*-
from kazoo.client import KazooClient
import os
import sys
import logging
import time
import signal
from multiprocessing import Process
main_dir = "/root/V3/project/"
signal_dir = '/signal/hexunblog'
task_type = "hexunblog"
def run_proc():
os.chdir(main_dir +"hexunblog/hexunblog/spiders")
#arg = ["HELLO","crawl", "spider_" + task_type,"--nolog"]
arg = ["HELLO","crawl", "spider_" + task_type]
os.execvp("scrapy",arg)
def run_wait(a,b):
try:
os.waitpid(-1, os.WNOHANG)
except Exception,e:
print "no child"
signal.signal(signal.SIGCHLD, run_wait)
watchPid = []
for i in range(1,len(sys.argv)):
watchPid.append(int(sys.argv[i]))
hosts_list = ['123.206.89.123:2181', '123.207.157.135:2181', '118.89.234.46:2181']
signal_dic = {"stop":signal.SIGKILL, "start":signal.SIGCONT, "pause":signal.SIGSTOP, "continue":signal.SIGCONT}
zk = KazooClient(hosts = hosts_list)
logging.basicConfig()
zk.start()
print "watch dog working"
stop_flag = False
@zk.ChildrenWatch(signal_dir)
def signal_watch(children):
if len(children) != 0:
global watchPid
for pid in watchPid:
os.kill(pid, signal_dic[children[0]])
if children[0] == "stop":
global stop_flag
stop_flag = True
def check(pid):
global stop_flag
if stop_flag == True:
sys.exit(0)
try:
os.kill(pid, 0)
return pid
except Exception: #判断
p = Process(target=run_proc)
p.start()
return p.pid
while True:
print "begin check"
global stop_flag
if stop_flag == True:
sys.exit(0)
for pid in watchPid:
newpid = check(pid)
if stop_flag == True:
sys.exit(0)
if newpid != pid:
print "new process"
watchPid.remove(pid)
watchPid.append(newpid)
time.sleep(5)
|
onevent.py
|
#!/usr/bin/python
import paho.mqtt.client as mqtt
import os
import subprocess
import urllib.request
from shutil import copyfile
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import argparse
import youtube.get_music_video_url
from multiprocessing import Process
base_path = '/home/pi/scripts/github/media_frame/data/music/'
player = None
def get_music_video_path(track_information, music_videos_path):
for f in os.listdir(music_videos_path):
if track_information in f:
return music_videos_path + f
return None
def download_video(url, music_videos_path, track_information):
# youtube-dl -f worst -o "Meshuggah - Clockworks.%(ext)s" https://www.youtube.com/watch?v=oFiDcazicdk
# download_cmd = '/home/pi/.local/bin/youtube-dl -f best -o "' + music_videos_path + track_information + '.%(ext)s" "' + url + '"&';
max_height = str(768)
download_cmd = '/home/pi/.local/bin/youtube-dl -f \'bestvideo[height<=' + max_height + ']+bestaudio/best[height<=' + max_height + ']\' -o "' + music_videos_path + track_information + '.%(ext)s" "' + url + '"&';
print('executing: "' + download_cmd + '"')
os.system(download_cmd)
def play_video(music_video_path):
volume = 0.0
try:
volume = float(execute('/usr/bin/playerctl --player=' + player + ' volume'))
if volume > 1.0:
volume /= 100
except Exception as e:
print(e)
print(player + ' volume: ' + str(volume))
os.system('/usr/bin/playerctl --player=' + player + ' pause')
# vlc_cmd = '/home/pi/.local/bin/youtube-dl -f worst -o - "' + url + '" | /usr/bin/vlc -f --play-and-exit -'
vlc_cmd = '/usr/bin/vlc -f --play-and-exit "' + music_video_path + '"'
next_cmd = '/usr/bin/playerctl --player=' + player + ' next'
print('vlc_cmd: ' + vlc_cmd)
os.system(vlc_cmd + '; ' + next_cmd + '&')
# quit() # no need to check for available download
def check_for_music_video(track_information):
play_youtube_videos = os.path.isfile(base_path + 'play_youtube_videos')
download_youtube_videos = os.path.isfile(base_path + 'download_youtube_videos')
play_youtube_videos = False
download_youtube_videos = True
music_videos_path = base_path + 'music_videos/'
music_video_path = get_music_video_path(track_information, music_videos_path)
if play_youtube_videos:
if music_video_path is None:
print('music video not found on local disk')
else:
print('music video found on local disk')
print(music_video_path)
process = Process(target=play_video, args=(music_video_path, ))
process.start()
if download_youtube_videos:
if music_video_path is None:
url = youtube.get_music_video_url.get_url(track_information)
if url:
print('music video url: ' + url)
process = Process(target=download_video, args=(url, music_videos_path, track_information, ))
process.start()
else:
print('no music video found on youtube')
else:
print('music video already downloaded!')
print(music_video_path)
def main():
print('onevent!')
global player
parser = argparse.ArgumentParser(description='onevent')
parser.add_argument('player', nargs=1, help='player (playerctl -l)')
args = parser.parse_args()
player = args.player[0]
if player == None:
print('No player given!')
quit()
else:
print('Player: ' + player)
state, track_information = get_track_information_playerctl()
print('track_information:', state, track_information)
if state != 'pause':
path = base_path + 'current_track.txt'
previous_track = None
if os.path.isfile(path):
with open(path, 'r') as f:
previous_track = f.read()
if previous_track != track_information:
f = open(path, 'w')
f.write(track_information)
f.close()
artwork_url = execute('/usr/bin/playerctl --player=' + player + ' metadata --format "{{ mpris:artUrl }}"')
pic_dir = base_path + 'artwork/'
if player == 'spotifyd':
spotifyd(artwork_url, pic_dir)
elif player == 'ShairportSync':
shairport(artwork_url, pic_dir, track_information)
check_for_music_video(track_information)
else:
print('paused')
print('changing PictureFrame to photos')
os.system('/home/pi/scripts/github/media_frame/scripts/change_media_to_photos.sh')
def spotifyd(artwork_url, pic_dir):
artwork_filename = artwork_url.split('/')[-1] + '.jpeg'
new_artwork_path = os.path.join(pic_dir, artwork_filename)
urllib.request.urlretrieve(artwork_url, new_artwork_path)
remove_old_artworks(new_artwork_path)
frame_next(player)
def shairport(artwork_url, pic_dir, track_information):
if 'file://' in artwork_url and os.path.isfile(artwork_url[7:]):
artwork_filename = artwork_url.split('/')[-1] + '.jpeg'
new_artwork_path = os.path.join(pic_dir, artwork_filename)
urllib.request.urlretrieve(artwork_url, new_artwork_path)
remove_old_artworks(new_artwork_path)
else:
artwork_url = get_artwork_url(track_information)
if artwork_url != None:
artwork_filename = artwork_url.split('/')[-1] + '.jpeg'
new_artwork_path = base_path + 'artwork/' + artwork_filename
remove_old_artworks()
urllib.request.urlretrieve(artwork_url, new_artwork_path)
frame_next(player + ' artwork')
else:
remove_old_artworks()
copyfile(base_path + 'default.jpg', base_path + 'artwork/default.jpg')
frame_next('default artwork')
frame_next(player)
def get_artists(artists):
artist = ''
for i in range(0, len(artists)):
artist += artists[i]['name']
if i != len(artists) - 1:
artists += ', '
return artist
def get_artwork_url(track_information):
ti_split = track_information.split(' - ')
if len(ti_split) == 2:
search_artist = ti_split[0]
search_title = ti_split[1]
sp = spotipy.Spotify(auth_manager=SpotifyClientCredentials(client_id='14e48d315bb649dba1a37ce4c764f58c', client_secret='ba72e489deb8442b90263689df69f8fb'))
result = sp.search(search_artist + ' ' + search_title)
rresult = result['tracks']['items']
for r in rresult:
if r['name'] == search_title and get_artists(r['artists']) == search_artist:
biggest_size_index = -1
images = r['album']['images']
biggest_size = -1
for i in range(0, len(images)):
image = images[i]
if image['height'] > biggest_size:
biggest_size = int(image['height'])
biggest_size_index = i
if biggest_size_index != -1:
return images[biggest_size_index]['url']
return None
def get_track_information_playerctl():
playerctl_state = execute('/usr/bin/playerctl --player=' + player + ' status')
state = None
if playerctl_state == 'Paused':
state = 'pause'
elif playerctl_state == 'Playing':
state = 'play'
else:
state = 'pause'
print('Error at receiving status, assuming paused state')
return state, ''
# quit()
track_information = execute('/usr/bin/playerctl --player=' + player + ' metadata --format "{{ artist }} - {{ title }}"')
return state, track_information
def remove_old_artworks(exceptFile = None):
pic_dir = base_path + 'artwork/'
files_to_remove = []
for f in os.listdir(pic_dir):
full_path = os.path.join(pic_dir, f)
if os.path.isfile(full_path):
if exceptFile is None or full_path != exceptFile:
files_to_remove.append(full_path)
for path in files_to_remove:
os.remove(path)
def execute(command):
result = subprocess.run([command], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
return result.stdout.decode('utf-8').replace('\n', '')
def frame_next(info = ''):
if os.path.isfile(base_path + 'is_active'):
client = mqtt.Client()
client.connect("localhost", 1883, 60)
client.publish("frame/next")
client.disconnect()
print('frame/next ' + info)
else:
print('frame/next ' + info)
print('changing PictureFrame to music')
os.system('/home/pi/scripts/github/media_frame/scripts/change_media_to_music.sh')
if __name__ == '__main__':
main()
# playerctl --player=spotifyd metadata --format "{{ artist }} - {{ title }} | {{ mpris:artUrl }}"
# playerctl --player=spotifyd metadata --format "{{ artist }} - {{ title }}" > /home/pi/scripts/github/media_frame/data/music/current_track.txt
|
ServerPlugin.py
|
__author__ = 'Scott Davey'
"""Simple tyre temperature plugin for Assetto Corsa"""
import sys
import ac
import acsys
import traceback
sys.path.insert(0, 'apps/python/ServerPlugin/ServerPlugin_lib/stdlib')
try:
import socketserver
import threading
from ServerPlugin_lib.UDPServer import UDPServer
except Exception as e:
ac.log("{}".format(traceback.format_exc()))
APP_NAME = "ACServer"
statusLabel = 0
server = None
def log(txt):
ac.log("ACServer: " + txt)
def acMain(ac_version):
global server
try:
appWindow = ac.newApp(APP_NAME)
ac.setSize(appWindow, 200, 200)
server = socketserver.UDPServer(('localhost', 18149), UDPServer)
threading.Thread(target=server.serve_forever).start()
setupUI(appWindow)
log("Startup Complete")
except Exception as e:
log("ERROR: {}".format(e))
return APP_NAME
def setupUI(appWindow):
global statusLabel
statusLabel = ac.addLabel(appWindow, "Disconnected");
ac.setFontColor(statusLabel, 255, 0, 0, 0)
ac.setPosition(statusLabel, 3, 30)
def acShutdown():
global server
server.shutdown()
|
radosBindings.py
|
import rados
import re
import os
import time
import threading
import signal
CONF_DIR = '/home/kostis/git/django/ceph/demo/utils'
CEPH_CONF = os.path.join(CONF_DIR, 'ceph.conf')
KEYRING = os.path.join(CONF_DIR, 'ceph.client.admin.keyring')
POOL = 'data'
TIMEOUT = 2
cluster = None
def _connect():
global cluster
try:
new_cluster = rados.Rados(conffile=CEPH_CONF, conf=dict(keyring=KEYRING))
new_cluster.connect(timeout=TIMEOUT)
cluster = new_cluster
print "*** Connection Established ***"
except:
try:
new_cluster.shutdown()
except:
pass
finally:
cluster = None
print "*** Could not establish connection ***"
def _test_connection():
try:
test_conn = rados.Rados(conffile=CEPH_CONF, conf=dict(keyring=KEYRING))
test_conn.connect(timeout=TIMEOUT)
print "*** Connection OK ***"
return True
except:
print "*** Connection FAILED ***"
return False
finally:
try:
test_conn.shutdown()
except:
pass
def _maintain_connection():
global cluster
while True:
time.sleep(20)
if _test_connection():
if cluster is None:
_connect()
else:
if cluster is not None:
try:
cluster.shutdown()
except:
pass
finally:
cluster = None
print "*** Shut down previous connection ***"
def connected(ret_type):
def decorator(f):
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
return ret_type()
return wrapped
return decorator
@connected(ret_type=list)
def get_object_list(user):
ioctx = cluster.open_ioctx(POOL)
objects = []
for obj in ioctx.list_objects(): # this looks REALLY bad :)
obj_owner, obj_name = _parse_object_name(obj)
if obj_owner == user:
objects.append(obj_name)
return objects
@connected(ret_type=str)
def get_data(user, obj):
obj_name = create_object_name(user, obj)
ioctx = cluster.open_ioctx(POOL)
return ioctx.read(obj_name)
@connected(ret_type=bool)
def delete_object(user, obj):
obj_name = create_object_name(user, obj)
ioctx = cluster.open_ioctx(POOL)
ioctx.remove_object(obj_name)
return True
@connected(ret_type=bool)
def store_object(user, name, data):
obj_name = create_object_name(user, name)
ioctx = cluster.open_ioctx(POOL)
ioctx.write_full(obj_name, str(data))
return True
def _parse_object_name(obj_name):
return obj_name.key.split(';')
def create_object_name(user, name):
return str("%s;%s" % (user, name))
def exists(user, name):
return name in get_object_list(user)
def is_valid_name(name):
return bool(re.match(r'^[a-zA-Z0-9\-]+$', name))
# should probably never be used
def startup_cluster():
from subprocess import call
call(['start-ceph'])
_connect()
threading.Thread(target=_maintain_connection).start()
|
s3.py
|
"""
Object Store plugin for the Amazon Simple Storage Service (S3)
"""
import logging
import multiprocessing
import os
import shutil
import subprocess
import threading
import time
from datetime import datetime
from galaxy.exceptions import ObjectNotFound
from galaxy.util import umask_fix_perms
from galaxy.util.directory_hash import directory_hash_id
from galaxy.util.sleeper import Sleeper
from .s3_multipart_upload import multipart_upload
from ..objectstore import ObjectStore, convert_bytes
try:
import boto
from boto.s3.key import Key
from boto.s3.connection import S3Connection
from boto.exception import S3ResponseError
except ImportError:
boto = None
NO_BOTO_ERROR_MESSAGE = "S3/Swift object store configured, but no boto dependency available. Please install and properly configure boto or modify object store configuration."
log = logging.getLogger( __name__ )
logging.getLogger('boto').setLevel(logging.INFO) # Otherwise boto is quite noisy
class S3ObjectStore(ObjectStore):
"""
Object store that stores objects as items in an AWS S3 bucket. A local
cache exists that is used as an intermediate location for files between
Galaxy and S3.
"""
def __init__(self, config, config_xml):
if boto is None:
raise Exception(NO_BOTO_ERROR_MESSAGE)
super(S3ObjectStore, self).__init__(config, config_xml)
self.config = config
self.staging_path = self.config.file_path
self.transfer_progress = 0
self._parse_config_xml(config_xml)
self._configure_connection()
self.bucket = self._get_bucket(self.bucket)
# Clean cache only if value is set in galaxy.ini
if self.cache_size != -1:
# Convert GBs to bytes for comparison
self.cache_size = self.cache_size * 1073741824
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
self.cache_monitor_thread.start()
log.info("Cache cleaner manager started")
# Test if 'axel' is available for parallel download and pull the key into cache
try:
subprocess.call('axel')
self.use_axel = True
except OSError:
self.use_axel = False
def _configure_connection(self):
log.debug("Configuring S3 Connection")
self.conn = S3Connection(self.access_key, self.secret_key)
def _parse_config_xml(self, config_xml):
try:
a_xml = config_xml.findall('auth')[0]
self.access_key = a_xml.get('access_key')
self.secret_key = a_xml.get('secret_key')
b_xml = config_xml.findall('bucket')[0]
self.bucket = b_xml.get('name')
self.use_rr = b_xml.get('use_reduced_redundancy', False)
cn_xml = config_xml.findall('connection')[0]
self.host = cn_xml.get('host', None)
self.port = int(cn_xml.get('port', 6000))
self.is_secure = cn_xml.get('is_secure', True)
self.conn_path = cn_xml.get('conn_path', '/')
c_xml = config_xml.findall('cache')[0]
self.cache_size = float(c_xml.get('size', -1))
self.cache_path = c_xml.get('path')
except Exception:
# Toss it back up after logging, we can't continue loading at this point.
log.exception("Malformed ObjectStore Configuration XML -- unable to continue")
raise
def __cache_monitor(self):
time.sleep(2) # Wait for things to load before starting the monitor
while self.running:
total_size = 0
# Is this going to be too expensive of an operation to be done frequently?
file_list = []
for dirpath, dirnames, filenames in os.walk(self.staging_path):
for f in filenames:
fp = os.path.join(dirpath, f)
file_size = os.path.getsize(fp)
total_size += file_size
# Get the time given file was last accessed
last_access_time = time.localtime(os.stat(fp)[7])
# Compose a tuple of the access time and the file path
file_tuple = last_access_time, fp, file_size
file_list.append(file_tuple)
# Sort the file list (based on access time)
file_list.sort()
# Initiate cleaning once within 10% of the defined cache size?
cache_limit = self.cache_size * 0.9
if total_size > cache_limit:
log.info("Initiating cache cleaning: current cache size: %s; clean until smaller than: %s"
% (convert_bytes(total_size), convert_bytes(cache_limit)))
# How much to delete? If simply deleting up to the cache-10% limit,
# is likely to be deleting frequently and may run the risk of hitting
# the limit - maybe delete additional #%?
# For now, delete enough to leave at least 10% of the total cache free
delete_this_much = total_size - cache_limit
self.__clean_cache(file_list, delete_this_much)
self.sleeper.sleep(30) # Test cache size every 30 seconds?
def __clean_cache(self, file_list, delete_this_much):
""" Keep deleting files from the file_list until the size of the deleted
files is greater than the value in delete_this_much parameter.
:type file_list: list
:param file_list: List of candidate files that can be deleted. This method
will start deleting files from the beginning of the list so the list
should be sorted accordingly. The list must contains 3-element tuples,
positioned as follows: position 0 holds file last accessed timestamp
(as time.struct_time), position 1 holds file path, and position 2 has
file size (e.g., (<access time>, /mnt/data/dataset_1.dat), 472394)
:type delete_this_much: int
:param delete_this_much: Total size of files, in bytes, that should be deleted.
"""
# Keep deleting datasets from file_list until deleted_amount does not
# exceed delete_this_much; start deleting from the front of the file list,
# which assumes the oldest files come first on the list.
deleted_amount = 0
for i, f in enumerate(file_list):
if deleted_amount < delete_this_much:
deleted_amount += f[2]
os.remove(f[1])
# Debugging code for printing deleted files' stats
# folder, file_name = os.path.split(f[1])
# file_date = time.strftime("%m/%d/%y %H:%M:%S", f[0])
# log.debug("%s. %-25s %s, size %s (deleted %s/%s)" \
# % (i, file_name, convert_bytes(f[2]), file_date, \
# convert_bytes(deleted_amount), convert_bytes(delete_this_much)))
else:
log.debug("Cache cleaning done. Total space freed: %s" % convert_bytes(deleted_amount))
return
def _get_bucket(self, bucket_name):
""" Sometimes a handle to a bucket is not established right away so try
it a few times. Raise error is connection is not established. """
for i in range(5):
try:
bucket = self.conn.get_bucket(bucket_name)
log.debug("Using cloud object store with bucket '%s'" % bucket.name)
return bucket
except S3ResponseError:
log.debug("Could not get bucket '%s', attempt %s/5" % (bucket_name, i + 1))
time.sleep(2)
# All the attempts have been exhausted and connection was not established,
# raise error
raise S3ResponseError
def _fix_permissions(self, rel_path):
""" Set permissions on rel_path"""
for basedir, dirs, files in os.walk(rel_path):
umask_fix_perms(basedir, self.config.umask, 0777, self.config.gid)
for f in files:
path = os.path.join(basedir, f)
# Ignore symlinks
if os.path.islink(path):
continue
umask_fix_perms( path, self.config.umask, 0666, self.config.gid )
def _construct_path(self, obj, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, **kwargs):
rel_path = os.path.join(*directory_hash_id(obj.id))
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# S3 folders are marked by having trailing '/' so add it now
rel_path = '%s/' % rel_path
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
return rel_path
def _get_cache_path(self, rel_path):
return os.path.abspath(os.path.join(self.staging_path, rel_path))
def _get_transfer_progress(self):
return self.transfer_progress
def _get_size_in_s3(self, rel_path):
try:
key = self.bucket.get_key(rel_path)
if key:
return key.size
except S3ResponseError, ex:
log.error("Could not get size of key '%s' from S3: %s" % (rel_path, ex))
except Exception, ex:
log.error("Could not get reference to the key object '%s'; returning -1 for key size: %s" % (rel_path, ex))
return -1
def _key_exists(self, rel_path):
exists = False
try:
# A hackish way of testing if the rel_path is a folder vs a file
is_dir = rel_path[-1] == '/'
if is_dir:
rs = self.bucket.get_all_keys(prefix=rel_path)
if len(rs) > 0:
exists = True
else:
exists = False
else:
key = Key(self.bucket, rel_path)
exists = key.exists()
except S3ResponseError, ex:
log.error("Trouble checking existence of S3 key '%s': %s" % (rel_path, ex))
return False
if rel_path[0] == '/':
raise
return exists
def _in_cache(self, rel_path):
""" Check if the given dataset is in the local cache and return True if so. """
# log.debug("------ Checking cache for rel_path %s" % rel_path)
cache_path = self._get_cache_path(rel_path)
return os.path.exists(cache_path)
# TODO: Part of checking if a file is in cache should be to ensure the
# size of the cached file matches that on S3. Once the upload tool explicitly
# creates, this check sould be implemented- in the mean time, it's not
# looking likely to be implementable reliably.
# if os.path.exists(cache_path):
# # print "***1 %s exists" % cache_path
# if self._key_exists(rel_path):
# # print "***2 %s exists in S3" % rel_path
# # Make sure the size in cache is available in its entirety
# # print "File '%s' cache size: %s, S3 size: %s" % (cache_path, os.path.getsize(cache_path), self._get_size_in_s3(rel_path))
# if os.path.getsize(cache_path) == self._get_size_in_s3(rel_path):
# # print "***2.1 %s exists in S3 and the size is the same as in cache (in_cache=True)" % rel_path
# exists = True
# else:
# # print "***2.2 %s exists but differs in size from cache (in_cache=False)" % cache_path
# exists = False
# else:
# # Although not perfect decision making, this most likely means
# # that the file is currently being uploaded
# # print "***3 %s found in cache but not in S3 (in_cache=True)" % cache_path
# exists = True
# else:
# return False
def _pull_into_cache(self, rel_path):
# Ensure the cache directory structure exists (e.g., dataset_#_files/)
rel_path_dir = os.path.dirname(rel_path)
if not os.path.exists(self._get_cache_path(rel_path_dir)):
os.makedirs(self._get_cache_path(rel_path_dir))
# Now pull in the file
ok = self._download(rel_path)
self._fix_permissions(self._get_cache_path(rel_path_dir))
return ok
def _transfer_cb(self, complete, total):
self.transfer_progress += 10
def _download(self, rel_path):
try:
log.debug("Pulling key '%s' into cache to %s" % (rel_path, self._get_cache_path(rel_path)))
key = self.bucket.get_key(rel_path)
# Test if cache is large enough to hold the new file
if self.cache_size > 0 and key.size > self.cache_size:
log.critical("File %s is larger (%s) than the cache size (%s). Cannot download."
% (rel_path, key.size, self.cache_size))
return False
if self.use_axel:
log.debug("Parallel pulled key '%s' into cache to %s" % (rel_path, self._get_cache_path(rel_path)))
ncores = multiprocessing.cpu_count()
url = key.generate_url(7200)
ret_code = subprocess.call("axel -a -n %s '%s'" % (ncores, url))
if ret_code == 0:
return True
else:
log.debug("Pulled key '%s' into cache to %s" % (rel_path, self._get_cache_path(rel_path)))
self.transfer_progress = 0 # Reset transfer progress counter
key.get_contents_to_filename(self._get_cache_path(rel_path), cb=self._transfer_cb, num_cb=10)
return True
except S3ResponseError, ex:
log.error("Problem downloading key '%s' from S3 bucket '%s': %s" % (rel_path, self.bucket.name, ex))
return False
def _push_to_os(self, rel_path, source_file=None, from_string=None):
"""
Push the file pointed to by ``rel_path`` to the object store naming the key
``rel_path``. If ``source_file`` is provided, push that file instead while
still using ``rel_path`` as the key name.
If ``from_string`` is provided, set contents of the file to the value of
the string.
"""
try:
source_file = source_file if source_file else self._get_cache_path(rel_path)
if os.path.exists(source_file):
key = Key(self.bucket, rel_path)
if os.path.getsize(source_file) == 0 and key.exists():
log.debug("Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping." % (source_file, rel_path))
return True
if from_string:
key.set_contents_from_string(from_string, reduced_redundancy=self.use_rr)
log.debug("Pushed data from string '%s' to key '%s'" % (from_string, rel_path))
else:
start_time = datetime.now()
log.debug("Pushing cache file '%s' of size %s bytes to key '%s'" % (source_file, os.path.getsize(source_file), rel_path))
mb_size = os.path.getsize(source_file) / 1e6
if mb_size < 10 or type(self) == SwiftObjectStore:
self.transfer_progress = 0 # Reset transfer progress counter
key.set_contents_from_filename(source_file,
reduced_redundancy=self.use_rr,
cb=self._transfer_cb,
num_cb=10)
else:
multipart_upload(self.bucket, key.name, source_file, mb_size, self.access_key, self.secret_key, use_rr=self.use_rr)
end_time = datetime.now()
log.debug("Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)" % (source_file, rel_path, os.path.getsize(source_file), end_time - start_time))
return True
else:
log.error("Tried updating key '%s' from source file '%s', but source file does not exist."
% (rel_path, source_file))
except S3ResponseError, ex:
log.error("Trouble pushing S3 key '%s' from file '%s': %s" % (rel_path, source_file, ex))
return False
def file_ready(self, obj, **kwargs):
"""
A helper method that checks if a file corresponding to a dataset is
ready and available to be used. Return ``True`` if so, ``False`` otherwise.
"""
rel_path = self._construct_path(obj, **kwargs)
# Make sure the size in cache is available in its entirety
if self._in_cache(rel_path):
if os.path.getsize(self._get_cache_path(rel_path)) == self._get_size_in_s3(rel_path):
return True
log.debug("Waiting for dataset {0} to transfer from OS: {1}/{2}".format(rel_path,
os.path.getsize(self._get_cache_path(rel_path)), self._get_size_in_s3(rel_path)))
return False
def exists(self, obj, **kwargs):
in_cache = in_s3 = False
rel_path = self._construct_path(obj, **kwargs)
# Check cache
if self._in_cache(rel_path):
in_cache = True
# Check S3
in_s3 = self._key_exists(rel_path)
# log.debug("~~~~~~ File '%s' exists in cache: %s; in s3: %s" % (rel_path, in_cache, in_s3))
# dir_only does not get synced so shortcut the decision
dir_only = kwargs.get('dir_only', False)
if dir_only:
if in_cache or in_s3:
return True
else:
return False
# TODO: Sync should probably not be done here. Add this to an async upload stack?
if in_cache and not in_s3:
self._push_to_os(rel_path, source_file=self._get_cache_path(rel_path))
return True
elif in_s3:
return True
else:
return False
def create(self, obj, **kwargs):
if not self.exists(obj, **kwargs):
# Pull out locally used fields
extra_dir = kwargs.get('extra_dir', None)
extra_dir_at_root = kwargs.get('extra_dir_at_root', False)
dir_only = kwargs.get('dir_only', False)
alt_name = kwargs.get('alt_name', None)
# Construct hashed path
rel_path = os.path.join(*directory_hash_id(obj.id))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# Create given directory in cache
cache_dir = os.path.join(self.staging_path, rel_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Although not really necessary to create S3 folders (because S3 has
# flat namespace), do so for consistency with the regular file system
# S3 folders are marked by having trailing '/' so add it now
# s3_dir = '%s/' % rel_path
# self._push_to_os(s3_dir, from_string='')
# If instructed, create the dataset in cache & in S3
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
open(os.path.join(self.staging_path, rel_path), 'w').close()
self._push_to_os(rel_path, from_string='')
def empty(self, obj, **kwargs):
if self.exists(obj, **kwargs):
return bool(self.size(obj, **kwargs) > 0)
else:
raise ObjectNotFound( 'objectstore.empty, object does not exist: %s, kwargs: %s'
% ( str( obj ), str( kwargs ) ) )
def size(self, obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
if self._in_cache(rel_path):
try:
return os.path.getsize(self._get_cache_path(rel_path))
except OSError, ex:
log.info("Could not get size of file '%s' in local cache, will try S3. Error: %s" % (rel_path, ex))
elif self.exists(obj, **kwargs):
return self._get_size_in_s3(rel_path)
log.warning("Did not find dataset '%s', returning 0 for size" % rel_path)
return 0
def delete(self, obj, entire_dir=False, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
try:
# For the case of extra_files, because we don't have a reference to
# individual files/keys we need to remove the entire directory structure
# with all the files in it. This is easy for the local file system,
# but requires iterating through each individual key in S3 and deleing it.
if entire_dir and extra_dir:
shutil.rmtree(self._get_cache_path(rel_path))
rs = self.bucket.get_all_keys(prefix=rel_path)
for key in rs:
log.debug("Deleting key %s" % key.name)
key.delete()
return True
else:
# Delete from cache first
os.unlink(self._get_cache_path(rel_path))
# Delete from S3 as well
if self._key_exists(rel_path):
key = Key(self.bucket, rel_path)
log.debug("Deleting key %s" % key.name)
key.delete()
return True
except S3ResponseError, ex:
log.error("Could not delete key '%s' from S3: %s" % (rel_path, ex))
except OSError, ex:
log.error('%s delete error %s' % (self._get_filename(obj, **kwargs), ex))
return False
def get_data(self, obj, start=0, count=-1, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Check cache first and get file if not there
if not self._in_cache(rel_path):
self._pull_into_cache(rel_path)
# Read the file content from cache
data_file = open(self._get_cache_path(rel_path), 'r')
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
def get_filename(self, obj, **kwargs):
dir_only = kwargs.get('dir_only', False)
rel_path = self._construct_path(obj, **kwargs)
cache_path = self._get_cache_path(rel_path)
# S3 does not recognize directories as files so cannot check if those exist.
# So, if checking dir only, ensure given dir exists in cache and return
# the expected cache path.
# dir_only = kwargs.get('dir_only', False)
# if dir_only:
# if not os.path.exists(cache_path):
# os.makedirs(cache_path)
# return cache_path
# Check if the file exists in the cache first
if self._in_cache(rel_path):
return cache_path
# Check if the file exists in persistent storage and, if it does, pull it into cache
elif self.exists(obj, **kwargs):
if dir_only: # Directories do not get pulled into cache
return cache_path
else:
if self._pull_into_cache(rel_path):
return cache_path
# For the case of retrieving a directory only, return the expected path
# even if it does not exist.
# if dir_only:
# return cache_path
raise ObjectNotFound( 'objectstore.get_filename, no cache_path: %s, kwargs: %s'
% ( str( obj ), str( kwargs ) ) )
# return cache_path # Until the upload tool does not explicitly create the dataset, return expected path
def update_from_file(self, obj, file_name=None, create=False, **kwargs):
if create:
self.create(obj, **kwargs)
if self.exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Chose whether to use the dataset file itself or an alternate file
if file_name:
source_file = os.path.abspath(file_name)
# Copy into cache
cache_file = self._get_cache_path(rel_path)
try:
if source_file != cache_file:
# FIXME? Should this be a `move`?
shutil.copy2(source_file, cache_file)
self._fix_permissions(cache_file)
except OSError, ex:
log.error("Trouble copying source file '%s' to cache '%s': %s" % (source_file, cache_file, ex))
else:
source_file = self._get_cache_path(rel_path)
# Update the file on S3
self._push_to_os(rel_path, source_file)
else:
raise ObjectNotFound( 'objectstore.update_from_file, object does not exist: %s, kwargs: %s'
% ( str( obj ), str( kwargs ) ) )
def get_object_url(self, obj, **kwargs):
if self.exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
try:
key = Key(self.bucket, rel_path)
return key.generate_url(expires_in=86400) # 24hrs
except S3ResponseError, ex:
log.warning("Trouble generating URL for dataset '%s': %s" % (rel_path, ex))
return None
def get_store_usage_percent(self):
return 0.0
class SwiftObjectStore(S3ObjectStore):
"""
Object store that stores objects as items in a Swift bucket. A local
cache exists that is used as an intermediate location for files between
Galaxy and Swift.
"""
def _configure_connection(self):
log.debug("Configuring Swift Connection")
self.conn = boto.connect_s3(aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_key,
is_secure=self.is_secure,
host=self.host,
port=self.port,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
path=self.conn_path)
|
py_os_pipe_perf.py
|
"""
https://cardinalpeak.com/blog/inter-thread-communication-without-a-mutex/
"""
import gc
import os
import time
import ctypes
import threading
count = int(1e5)
def buffer_perf():
""
gc.collect()
flags = os.O_DIRECT | os.O_CLOEXEC # packet mode, propagate exception
reader_fd, writer_fd = os.pipe2(flags)
source = [index for index in range(count)]
target = [None for index in range(count)]
def buffer_reader_sync():
for index in range(count):
packet = os.read(reader_fd, 8)
addr = int.from_bytes(packet, 'little')
value = addr # TODO from addr
target[index] = value
def buffer_writer_sync():
for value in source:
addr = value # TODO into addr
packet = addr.to_bytes(8, byteorder='little')
os.write(writer_fd, packet)
thread_reader = threading.Thread(target=buffer_reader_sync, daemon=True)
thread_writer = threading.Thread(target=buffer_writer_sync, daemon=True)
time_start = time.time()
thread_reader.start()
thread_writer.start()
thread_reader.join()
thread_writer.join()
time_finish = time.time()
# assert source == target # TODO
return time_finish - time_start
def invoke_perf(session_size:int=3):
for session in range(session_size):
print(f"------ session={session} ------")
time_diff = buffer_perf()
time_unit = int(1e6 * time_diff / count)
print(f"time_unit: {time_unit}")
invoke_perf()
|
ScanNetwork.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__doc__="""
# Digite o Primeiro IP Completo (EX: 10.32.0.1)
# Depois o ultimo Octeto (EX: 254)
# No final será mostrado os IP's E OS MAC's conectados
# E DIRÁ O NÚMERO DE EQUIPAMENTOS ONLINE NA REDE
"""
import sys
import subprocess
import os
import subprocess
import time
import threading
from threading import Thread
class NetworkScanner(object):
ips_online=[]
threads=[]
def scannear_rede(self, ip_inicial, ip_final):
ip_base=subprocess.getoutput("echo %s 2> /dev/null | egrep -o \"([0-9]{1,3}\.){3}\"" % ip_inicial)
ip_inicial=int(subprocess.getoutput("echo %s 2> /dev/null | egrep -o \"([0-9]{1,3})$\"" % ip_inicial))
ip_final=int(ip_final)
while(ip_inicial <= ip_final):
ip=ip_base+str(ip_inicial)
self.threads.append(threading.Thread(target=self.ping, args=(ip,)).start())
ip_inicial += 1
def ping(self, ip):
time.sleep(0.2)
ping = os.system('ping -c 1 %s > /dev/null 2> /dev/null' % ip)
if(ping==0):
mac_adress=subprocess.getoutput("arp -a %s 2> /dev/null | egrep -o \"([a-Z,0-9]{2}\:){5}[a-Z,0-9]{2}\"" % ip)
self.ips_online.append((ip,mac_adress,))
while(len(self.threads)==0):
time.sleep(0.5)
self.threads.pop()
print("\n\n*****************\nTHREADS EXISTENTES >> %s\n*****************\n" % len(self.threads))
return
def main():
ip_inicial = input("Digite o IP inicial (completo): ")
ip_final = input("Digite o IP final (apenas o último octeto. Ex: 254): ")
scan = NetworkScanner()
scan.scannear_rede(ip_inicial, ip_final)
while(len(scan.threads)>0):
time.sleep(0.5)
#print("\n\n*****************\nTHREADS EXISTENTES >> %s\n*****************\n" % len(scan.threads))
scan.ips_online.sort()
for pc in scan.ips_online:
print("PC ONLINE >> IP=%s - MAC=%s" % (pc[0], pc[1]))
print("\nExistem %s dispositivos online neste momento\n\n" % len(scan.ips_online))
return 0
if __name__ == '__main__':
main()
|
test_tls.py
|
import datetime as dt
import itertools
import multiprocessing as mp
import pickle
import platform
import select
import socket
import sys
import time
import pytest
from mbedtls import hashlib
from mbedtls.exceptions import TLSError
from mbedtls.pk import RSA
from mbedtls.tls import *
from mbedtls.tls import _BaseConfiguration as BaseConfiguration
from mbedtls.tls import _DTLSCookie as DTLSCookie
from mbedtls.tls import _enable_debug_output
from mbedtls.tls import _PSKSToreProxy as PSKStoreProxy
from mbedtls.tls import _set_debug_level
from mbedtls.tls import TLSSession
from mbedtls.x509 import CRT, CSR, BasicConstraints
try:
from contextlib import suppress
except ImportError:
# Python 2.7
from contextlib2 import suppress
try:
FileNotFoundError
except NameError:
# Python 2.7
FileNotFoundError = OSError
class Client:
def __init__(self, cli_conf, proto, srv_address, srv_hostname):
super().__init__()
self.cli_conf = cli_conf
self.proto = proto
self.srv_address = srv_address
self.srv_hostname = srv_hostname
self._sock = None
def __enter__(self):
self.start()
return self
def __exit__(self, *exc_info):
self.stop()
def __del__(self):
self.stop()
@property
def context(self):
if self._sock is None:
return None
return self._sock.context
def do_handshake(self):
if not self._sock:
return
self._sock.do_handshake()
def echo(self, buffer, chunksize):
if not self._sock:
return
view = memoryview(buffer)
received = bytearray()
for idx in range(0, len(view), chunksize):
part = view[idx : idx + chunksize]
amt = self._sock.send(part)
received += self._sock.recv(2 << 13)
return received
def start(self):
if self._sock:
self.stop()
self._sock = ClientContext(self.cli_conf).wrap_socket(
socket.socket(socket.AF_INET, self.proto),
server_hostname=self.srv_hostname,
)
self._sock.connect(self.srv_address)
def stop(self):
if not self._sock:
return
with suppress(TLSError, OSError):
self._sock.close()
self._sock = None
def restart(self):
self.stop()
self.start()
class Server:
def __init__(self, srv_conf, proto, conn_q):
super().__init__()
self.srv_conf = srv_conf
self.proto = proto
self.conn_q = conn_q
self._sock = None
def __enter__(self):
self.start()
return self
def __exit__(self, *exc_info):
self.stop()
def __del__(self):
self.stop()
@property
def context(self):
if self._sock is None:
return None
return self._sock.context
def start(self):
if self._sock:
self.stop()
self._sock = ServerContext(self.srv_conf).wrap_socket(
socket.socket(socket.AF_INET, self.proto)
)
self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._sock.bind(
("127.0.0.1" if platform.system() == "Windows" else "", 0)
)
if self.proto == socket.SOCK_STREAM:
self._sock.listen(1)
self.conn_q.put(self._sock.getsockname())
def stop(self):
if not self._sock:
return
with suppress(TLSError, OSError):
self._sock.close()
self._sock = None
def run(self, conn_handler):
with self:
{
TLSConfiguration: self._run_tls,
DTLSConfiguration: self._run_dtls,
}[type(self.srv_conf)](conn_handler)
def _run_tls(self, conn_handler):
assert self._sock
conn, addr = self._sock.accept()
try:
conn.do_handshake()
except TLSError:
conn.close()
return
try:
conn_handler(conn)
finally:
conn.close()
def _run_dtls(self, conn_handler):
assert self._sock
cli, addr = self._sock.accept()
cli.setcookieparam(addr[0].encode("ascii"))
with pytest.raises(HelloVerifyRequest):
cli.do_handshake()
_, (cli, addr) = cli, cli.accept()
_.close()
cli.setcookieparam(addr[0].encode("ascii"))
try:
cli.do_handshake()
except TLSError:
cli.close()
return
try:
conn_handler(cli)
finally:
cli.close()
class EchoHandler:
def __init__(self, stop_ev, packet_size=4096):
self.stop_ev = stop_ev
self.packet_size = packet_size
def __call__(self, conn):
while not self.stop_ev.is_set():
readable, _, err = select.select([conn], [], [], 0.1)
if err:
break
for _ in readable:
# We use `send()` instead of `sendto()` for DTLS as well
# because the DTLS socket is connected.
received = conn.recv(self.packet_size)
sent = conn.send(received)
class TestPickle:
@pytest.fixture
def session(self):
return TLSSession()
@pytest.fixture(params=[TLSConfiguration, DTLSConfiguration])
def conf(self, request):
return request.param()
@pytest.fixture(params=[ClientContext, ServerContext])
def context(self, request, conf):
return request.param(conf)
@pytest.fixture
def identity(self):
return lambda obj: pickle.loads(pickle.dumps(obj))
@pytest.fixture
def tls_wrapped_buffer(self, context):
return TLSWrappedBuffer(context)
@pytest.fixture
def tls_wrapped_socket(self, tls_wrapped_buffer):
return TLSWrappedSocket(socket.socket(), tls_wrapped_buffer)
def test_session(self, session):
with pytest.raises(TypeError) as excinfo:
pickle.dumps(session)
assert str(excinfo.value).startswith("cannot pickle")
def test_configuration(self, conf, identity):
assert conf == identity(conf)
def test_context(self, context, identity):
with pytest.raises(TypeError) as excinfo:
pickle.dumps(context)
assert str(excinfo.value).startswith("cannot pickle")
def test_tls_wrapped_buffer(self, tls_wrapped_buffer):
with pytest.raises(TypeError) as excinfo:
pickle.dumps(tls_wrapped_buffer)
assert str(excinfo.value).startswith("cannot pickle")
def test_tls_wrapped_socket(self, tls_wrapped_socket):
# Python socket.socket is not pickable.
with pytest.raises(TypeError) as excinfo:
pickle.dumps(tls_wrapped_socket)
assert str(excinfo.value).startswith("cannot pickle")
class TestPSKStoreProxy:
@pytest.fixture
def psk_store(self):
return {"client": b"the secret key"}
@pytest.fixture
def proxy(self, psk_store):
return PSKStoreProxy(psk_store)
@pytest.mark.parametrize("repr_", (repr, str), ids=lambda f: f.__name__)
def test_repr(self, repr_, psk_store):
assert isinstance(repr_(psk_store), str)
def test_unwrap(self, proxy, psk_store):
assert proxy.unwrap() == psk_store
def test_eq(self, proxy, psk_store):
for k, v in psk_store.items():
assert proxy[k] == v
def test_len(self, proxy, psk_store):
assert len(proxy) == len(psk_store)
class TestTLSVersion:
@pytest.mark.parametrize("version", TLSVersion)
def test_major(self, version):
assert version.major() == 3
def test_minor(self):
# assert TLSVersion.SSLv3.minor() == 0
assert TLSVersion.TLSv1.minor() == 1
assert TLSVersion.TLSv1_1.minor() == 2
assert TLSVersion.TLSv1_2.minor() == 3
@pytest.mark.parametrize("version", TLSVersion)
def test_from_major_minor(self, version):
assert (
TLSVersion.from_major_minor(version.major(), version.minor())
is version
)
@pytest.mark.parametrize(
"version", [TLSVersion.MINIMUM_SUPPORTED, TLSVersion.MAXIMUM_SUPPORTED]
)
def test_minmax_supported(self, version):
assert version in TLSVersion
class TestDTLSVersion:
@pytest.mark.parametrize("version", DTLSVersion)
def test_major(self, version):
assert version.major() == 3
def test_minor(self):
assert DTLSVersion.DTLSv1_0.minor() == 2
assert DTLSVersion.DTLSv1_2.minor() == 3
@pytest.mark.parametrize("version", DTLSVersion)
def test_from_major_minor(self, version):
assert (
DTLSVersion.from_major_minor(version.major(), version.minor())
is version
)
@pytest.mark.parametrize(
"version",
[DTLSVersion.MINIMUM_SUPPORTED, DTLSVersion.MAXIMUM_SUPPORTED],
)
def test_minmax_supported(self, version):
assert version in DTLSVersion
class TestTLSRecordHeader:
@pytest.fixture(params=TLSRecordHeader.RecordType)
def record_type(self, request):
return request.param
@pytest.fixture(params=TLSVersion)
def version(self, request):
return request.param
@pytest.fixture
def length(self):
return 42
@pytest.fixture
def header(self, record_type, version, length):
return TLSRecordHeader(record_type, version, length)
@pytest.mark.parametrize("repr_", (repr, str), ids=lambda f: f.__name__)
def test_repr(self, repr_, record_type):
assert isinstance(repr_(record_type), str)
def test_hash(self, record_type):
assert isinstance(hash(record_type), int)
def test_accessors(self, header, record_type, version, length):
assert len(header) == 5
assert header.record_type is record_type
assert header.version is version
assert header.length == length
def test_serialization(self, header):
serialized = bytes(header)
assert isinstance(serialized, bytes)
assert len(serialized) == 5
assert TLSRecordHeader.from_bytes(serialized) == header
class TestTLSSession:
@pytest.fixture
def session(self):
return TLSSession()
def test_repr(self, session):
assert isinstance(repr(session), str)
class Chain:
@pytest.fixture(scope="class")
def now(self):
return dt.datetime.utcnow()
@pytest.fixture(scope="class")
def digestmod(self):
return hashlib.sha256
@pytest.fixture(scope="class")
def ca0_key(self):
ca0_key = RSA()
ca0_key.generate()
return ca0_key
@pytest.fixture(scope="class")
def ca1_key(self):
ca1_key = RSA()
ca1_key.generate()
return ca1_key
@pytest.fixture(scope="class")
def ee0_key(self):
ee0_key = RSA()
ee0_key.generate()
return ee0_key
@pytest.fixture(scope="class")
def ca0_crt(self, ca0_key, digestmod, now):
ca0_csr = CSR.new(ca0_key, "CN=Trusted CA", digestmod())
return CRT.selfsign(
ca0_csr,
ca0_key,
not_before=now,
not_after=now + dt.timedelta(days=90),
serial_number=0x123456,
basic_constraints=BasicConstraints(True, -1),
)
@pytest.fixture(scope="class")
def ca1_crt(self, ca1_key, ca0_crt, ca0_key, digestmod, now):
ca1_csr = CSR.new(ca1_key, "CN=Intermediate CA", digestmod())
return ca0_crt.sign(
ca1_csr,
ca0_key,
now,
now + dt.timedelta(days=90),
0x234567,
basic_constraints=BasicConstraints(True, -1),
)
@pytest.fixture(scope="class")
def ee0_crt(self, ee0_key, ca1_crt, ca1_key, digestmod, now):
ee0_csr = CSR.new(ee0_key, "CN=End Entity", digestmod())
return ca1_crt.sign(
ee0_csr, ca1_key, now, now + dt.timedelta(days=90), 0x345678
)
@pytest.fixture(scope="class")
def certificate_chain(self, ee0_crt, ca1_crt, ee0_key):
return (ee0_crt, ca1_crt), ee0_key
class TestTrustStore(Chain):
@pytest.fixture
def store(self):
return TrustStore.system()
@pytest.mark.parametrize("repr_", (repr, str), ids=lambda f: f.__name__)
def test_repr(self, repr_, store):
assert isinstance(repr_(store), str)
def test_eq(self, store):
other = TrustStore(store)
assert store is not other
assert store == other
def test_bool(self, store):
assert not TrustStore()
assert store
def test_len(self, store):
assert len(store) != 0
def test_iter(self, store):
assert store[0] != store[1]
for n, crt in enumerate(store, start=1):
assert crt in store
assert n == len(store)
def test_add_existing_certificate(self, store):
length = len(store)
store.add(store[0])
assert len(store) == length
def test_add_new_certificate(self, store, ca0_crt):
length = len(store)
store.add(ca0_crt)
assert len(store) == length + 1
class TestDTLSCookie:
@pytest.fixture
def cookie(self):
return DTLSCookie()
def test_generate_does_not_raise(self, cookie):
cookie.generate()
def test_timeout(self, cookie):
assert cookie.timeout == 60
cookie.timeout = 1000
assert cookie.timeout == 1000
class _BaseConfiguration(Chain):
@pytest.fixture
def conf(self):
raise NotImplementedError
@pytest.fixture
def version(self):
raise NotImplementedError
@pytest.mark.parametrize("repr_", (repr, str), ids=lambda f: f.__name__)
def test_repr(self, repr_, conf):
assert isinstance(repr_(conf), str)
@pytest.mark.parametrize("validate", [True, False])
def test_set_validate_certificates(self, conf, validate):
conf_ = conf.update(validate_certificates=validate)
assert conf_.validate_certificates is validate
@pytest.mark.parametrize("chain", [((), None), None])
def test_set_certificate_chain(self, conf, chain, certificate_chain):
if chain is None:
chain = certificate_chain
conf_ = conf.update(certificate_chain=chain)
assert conf_.certificate_chain == chain
@pytest.mark.parametrize("ciphers", (ciphers_available(),))
def test_set_ciphers(self, conf, ciphers):
conf_ = conf.update(ciphers=ciphers)
assert conf_.ciphers == ciphers
@pytest.mark.parametrize(
"inner_protocols",
[[], (), [NextProtocol.H2, NextProtocol.H2C], [b"h2", b"h2c", b"ftp"]],
)
def test_set_inner_protocols(self, conf, inner_protocols):
conf_ = conf.update(inner_protocols=inner_protocols)
assert conf_.inner_protocols == tuple(
NextProtocol(_) for _ in inner_protocols
)
def test_lowest_supported_version(self, conf, version):
conf_ = conf.update(lowest_supported_version=version)
assert conf_.lowest_supported_version is version
def test_highest_supported_version(self, conf, version):
conf_ = conf.update(highest_supported_version=version)
assert conf_.highest_supported_version is version
@pytest.mark.parametrize("store", [TrustStore.system()])
def test_trust_store(self, conf, store):
conf_ = conf.update(trust_store=store)
assert store
assert conf_.trust_store == store
@pytest.mark.parametrize("callback", [None])
def test_set_sni_callback(self, conf, callback):
assert conf.sni_callback is None
@pytest.mark.parametrize("psk", [None, ("client", b"the secret key")])
def test_psk(self, conf, psk):
assert conf.pre_shared_key is None
conf_ = conf.update(pre_shared_key=psk)
assert conf_.pre_shared_key == psk
@pytest.mark.parametrize(
"psk_store", [None, {"client": b"the secret key"}]
)
def test_psk_store(self, conf, psk_store):
assert conf.pre_shared_key_store is None
conf_ = conf.update(pre_shared_key_store=psk_store)
assert conf_.pre_shared_key_store == psk_store
class TestTLSConfiguration(_BaseConfiguration):
@pytest.fixture
def conf(self):
return TLSConfiguration()
@pytest.fixture(params=TLSVersion)
def version(self, request):
return request.param
class TestDTLSConfiguration(_BaseConfiguration):
@pytest.fixture
def conf(self):
return DTLSConfiguration()
@pytest.fixture(params=DTLSVersion)
def version(self, request):
return request.param
@pytest.mark.parametrize("anti_replay", [True, False])
def test_set_anti_replay(self, conf, anti_replay):
assert conf.anti_replay is True
conf_ = conf.update(anti_replay=anti_replay)
assert conf_.anti_replay is anti_replay
@pytest.mark.parametrize(
"hs_min, hs_max", [(1, 60), (42, 69), (4.2, 6.9), (42.0, 69.0)]
)
def test_handshake_timeout_minmax(self, conf, hs_min, hs_max):
assert conf.handshake_timeout_min == 1.0
assert conf.handshake_timeout_max == 60.0
conf_ = conf.update(
handshake_timeout_min=hs_min,
handshake_timeout_max=hs_max,
)
assert conf_.handshake_timeout_min == hs_min
assert conf_.handshake_timeout_max == hs_max
@pytest.mark.parametrize(
"hs_min, hs_max", [(None, None), (1, None), (None, 60)]
)
def test_handshake_timeout_default(self, conf, hs_min, hs_max):
conf_ = conf.update(
handshake_timeout_min=hs_min,
handshake_timeout_max=hs_max,
)
assert conf_.handshake_timeout_min == hs_min or 1.0
assert conf_.handshake_timeout_max == hs_max or 60.0
class TestBaseContext:
@pytest.fixture(params=[Purpose.SERVER_AUTH, Purpose.CLIENT_AUTH])
def purpose(self, request):
return request.param
@pytest.fixture(params=[TLSConfiguration, DTLSConfiguration])
def conf(self, request):
return request.param()
@pytest.fixture(params=[ServerContext, ClientContext])
def context(self, conf, request):
cls = request.param
return cls(conf)
def test_repr(self, context):
assert isinstance(repr(context), str)
def test_get_configuration(self, context, conf):
assert conf
assert context.configuration is conf
def test_selected_npn_protocol(self, context):
assert context._selected_npn_protocol() is None
def test_cipher(self, context):
assert context._cipher() is None
def test_get_channel_binding(self, context):
assert context._get_channel_binding() is None
# def test_negotiated_tls_version(self, context):
# assert context._negotiated_tls_version() is TLSVersion.SSLv3
@pytest.fixture
def tls_wrapped_buffer(self, context):
return TLSWrappedBuffer(context)
@pytest.mark.parametrize("repr_", (repr, str), ids=lambda f: f.__name__)
def test_repr_tls_wrapped_buffer(self, repr_, tls_wrapped_buffer):
assert isinstance(repr_(tls_wrapped_buffer), str)
class TestClientContext(TestBaseContext):
@pytest.fixture(params=[None, "hostname", "localhost"])
def hostname(self, request):
return request.param
@pytest.fixture
def context(self, conf, hostname):
return ClientContext(conf)
def test_context(self, context):
assert isinstance(context, ClientContext)
def test_hostname(self, context, hostname):
_ = context.wrap_buffers(hostname)
assert context._hostname == hostname
def test_wrap_buffers(self, context):
assert isinstance(context.wrap_buffers(None), TLSWrappedBuffer)
class TestServerContext(TestBaseContext):
@pytest.fixture
def context(self, conf):
return ServerContext(conf)
def test_context(self, context):
assert isinstance(context, ServerContext)
def test_wrap_buffers(self, context):
assert isinstance(context.wrap_buffers(), TLSWrappedBuffer)
PSK_AUTHENTICATION_CIPHERS = (
"TLS-ECDHE-PSK-WITH-AES-256-CBC-SHA",
"TLS-ECDHE-PSK-WITH-AES-128-CBC-SHA",
"TLS-DHE-PSK-WITH-AES-256-CBC-SHA",
"TLS-DHE-PSK-WITH-AES-128-CBC-SHA",
"TLS-RSA-PSK-WITH-AES-256-CBC-SHA",
"TLS-RSA-PSK-WITH-AES-128-CBC-SHA",
"TLS-PSK-WITH-AES-256-CBC-SHA",
"TLS-PSK-WITH-AES-128-CBC-SHA",
)
def generate_configs(*configs):
for conf, versions in configs:
for version in versions:
yield conf, version
class TestCommunication(Chain):
@pytest.fixture(
params=generate_configs(
(TLSConfiguration, TLSVersion), (DTLSConfiguration, DTLSVersion)
)
)
def configs(self, request):
return request.param
@pytest.fixture
def conf_cls(self, configs):
assert issubclass(configs[0], BaseConfiguration)
return configs[0]
@pytest.fixture
def version(self, configs):
assert isinstance(configs[1], (TLSVersion, DTLSVersion))
return configs[1]
@pytest.fixture
def version_min(self, conf_cls):
return {
TLSConfiguration: TLSVersion.MINIMUM_SUPPORTED,
DTLSConfiguration: DTLSVersion.MINIMUM_SUPPORTED,
}[conf_cls]
@pytest.fixture
def proto(self, conf_cls):
return {
TLSConfiguration: socket.SOCK_STREAM,
DTLSConfiguration: socket.SOCK_DGRAM,
}[conf_cls]
@pytest.fixture
def srv_conf(
self,
conf_cls,
version,
version_min,
trust_store,
certificate_chain,
srv_psk,
ciphers,
):
return conf_cls(
trust_store=trust_store,
certificate_chain=certificate_chain,
lowest_supported_version=version_min,
highest_supported_version=version,
ciphers=ciphers,
pre_shared_key_store=srv_psk,
validate_certificates=False,
)
@pytest.fixture
def cli_conf(
self, conf_cls, version, version_min, trust_store, cli_psk, ciphers
):
return conf_cls(
trust_store=trust_store,
lowest_supported_version=version_min,
highest_supported_version=version,
ciphers=ciphers,
pre_shared_key=cli_psk,
validate_certificates=True,
)
@pytest.fixture(params=[4])
def debug(self, srv_conf, cli_conf, request):
_enable_debug_output(srv_conf)
_enable_debug_output(cli_conf)
_set_debug_level(request.param)
@pytest.fixture(scope="class", params=[None])
def ciphers(self, request):
return request.param
@pytest.fixture(scope="class", params=["End Entity"])
def srv_hostname(self, request):
return request.param
@pytest.fixture(scope="class", params=[None])
def cli_psk(self, request):
return request.param
@pytest.fixture(scope="class", params=[None])
def srv_psk(self, request):
return request.param
@pytest.fixture(params=[False])
def buffer(self, request, randbytes):
return randbytes(5 * 16 * 1024)
@pytest.fixture(scope="class")
def trust_store(self, ca0_crt):
store = TrustStore()
store.add(ca0_crt)
return store
@pytest.fixture
def server(self, srv_conf, version, proto):
conn_q = mp.SimpleQueue()
stop_ev = mp.Event()
srv = Server(srv_conf, proto, conn_q)
runner = mp.Process(target=srv.run, args=(EchoHandler(stop_ev),))
runner.start()
yield conn_q.get()
stop_ev.set()
runner.join()
@pytest.fixture
def client(self, server, srv_hostname, cli_conf, proto):
return Client(cli_conf, proto, server, srv_hostname)
@pytest.mark.timeout(10)
@pytest.mark.usefixtures("server")
@pytest.mark.parametrize(
"srv_hostname", ["Wrong End Entity"], indirect=True
)
def test_host_name_verification_failure(self, client, srv_hostname):
with pytest.raises(TLSError), client:
client.do_handshake()
@pytest.mark.timeout(10)
@pytest.mark.usefixtures("server")
@pytest.mark.parametrize(
"ciphers", [PSK_AUTHENTICATION_CIPHERS], indirect=True
)
@pytest.mark.parametrize(
"srv_psk", [{"client": b"the secret key"}], indirect=True
)
@pytest.mark.parametrize(
"cli_psk", [("client", b"the secret key")], indirect=True
)
@pytest.mark.parametrize("chunksize", [1024])
def test_psk_authentication_success(self, client, buffer, chunksize):
with client:
client.do_handshake()
assert client.echo(buffer, chunksize) == buffer
@pytest.mark.timeout(10)
@pytest.mark.usefixtures("server")
@pytest.mark.parametrize(
"ciphers", [PSK_AUTHENTICATION_CIPHERS], indirect=True
)
@pytest.mark.parametrize(
"srv_psk",
[
{"client": b"another key"},
{"another client": b"the secret key"},
{"another client": b"another key"},
],
indirect=True,
)
@pytest.mark.parametrize(
"cli_psk", [("client", b"the secret key")], indirect=True
)
def test_psk_authentication_failure(self, client):
with pytest.raises(TLSError), client:
client.do_handshake()
@pytest.mark.timeout(10)
@pytest.mark.usefixtures("server")
@pytest.mark.parametrize("ciphers", (ciphers_available(),), indirect=True)
@pytest.mark.parametrize("chunksize", [1024])
def test_client_server(self, client, buffer, chunksize):
with client:
while True:
try:
client.do_handshake()
except (WantReadError, WantWriteError):
pass
except TLSError:
client.restart()
else:
break
assert client.echo(buffer, chunksize) == buffer
@pytest.mark.timeout(10)
@pytest.mark.usefixtures("server")
@pytest.mark.parametrize("ciphers", (ciphers_available(),), indirect=True)
def test_session_caching(self, client, cli_conf):
session = TLSSession()
with client:
while True:
try:
client.do_handshake()
except (WantReadError, WantWriteError):
pass
except (ConnectionError, TLSError):
client.restart()
else:
break
session.save(client.context)
new_context = session.resume(cli_conf)
assert isinstance(new_context, ClientContext)
assert new_context._verified
|
gerrit_util_test.py
|
#!/usr/bin/python3.8
#
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import http.server
import threading
from unittest import mock
from typing import List, Optional
import gerrit_util
# Implements logic to bring up a simple HTTP server that responds to a single JSON
# request and shuts down again.
class JsonResponder:
def __init__(self, response: bytes, code: int = 200):
self.response: bytes = response
self.code = code
self.got_request: bool = False
self.url: Optional[str] = None
# Start up a simple HTTP server running on its own thread.
self._server = http.server.HTTPServer(
('localhost', 0), self._make_handler())
self._server_thread = threading.Thread(
target=self._server.serve_forever, args=())
self._server_thread.daemon = True
self._server_thread.start()
self.port = self._server.server_port
def __del__(self):
self._server.shutdown()
self._server_thread.join()
def _make_handler(self):
# Give access to "self" in the new Handler class under the name "parent".
parent = self
# Create a Handler class that, when instantiated, responds to GET and POST requests
# with the given response.
class Handler(http.server.BaseHTTPRequestHandler):
def do_POST(self) -> None:
self.send_response(parent.code)
self.send_header('Content-type', 'javascript/json')
self.end_headers()
self.wfile.write(b")]}'\n") # Write the JSON header.
self.wfile.write(parent.response)
parent.url = self.path
return Handler
@mock.patch('gerrit_util.GERRIT_PROTOCOL', 'http')
class TestGerritUtil(unittest.TestCase):
# Test plumbing through GerritUtil to a HTTP server and back again.
def test_post_json(self) -> None:
responder = JsonResponder(b'{"labels": {"Commit-Queue": 2}}')
gerrit_util.SetReview(
'localhost:%d' % responder.port,
'12345',
labels={'Commit-Queue': 2},
notify=False)
# Ensure authentication errors are returned as GerritError exceptions.
def test_auth_error(self) -> None:
responder = JsonResponder(b'Authentication error', 400)
with self.assertRaises(gerrit_util.GerritError) as context:
gerrit_util.SetReview(
'localhost:%d' % responder.port,
'12345',
labels={'Commit-Queue': 2},
notify=False)
self.assertTrue(
'Try generating a new authentication password' in
context.exception.message)
if __name__ == '__main__':
unittest.main()
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import unittest
from absl.testing import parameterized
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top
except:
pass
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(_TABLE_SHARED_NAME_PATTERN, str(node.attr["shared_name"].s)):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return pywrap_tensorflow.IsBuiltWithROCm()
def GpuSupportsHalfMatMulAndConv():
return pywrap_tensorflow.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def assert_no_new_pyobjects_executing_eagerly(f):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
"""
def decorator(self, *args, **kwargs):
"""Warms up, gets an object count, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various versions
# of python2.7.x.
for _ in range(2):
f(self, *args, **kwargs)
gc.collect()
previous_count = len(gc.get_objects())
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, *args, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
new_count = len(gc.get_objects())
# In some cases (specifacally on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert new_count <= previous_count, (
"new_count(%d) is not less than or equal to previous_count(%d)" %
(new_count, previous_count))
gc.enable()
return decorator
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, blacklist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(blacklist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in blacklist:
if b is obj:
return "<test code>"
if obj is blacklist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, blacklist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
blacklist: same as blacklist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, blacklist):
return "{}{}".format(get_ignore_reason(obj, blacklist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, blacklist):
"""Builds a reference graph as <referrer> -> <list of refferents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
blacklist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
blacklist = blacklist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, blacklist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, blacklist)
reprs[r_id] = describe(r, blacklist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = _combine_named_parameters(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
combinations = [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
return combinations
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
reset_test=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
reset_test: If True, tearDown and SetUp the test case between the two
executions of the test (once with and once without eager execution).
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
if reset_test:
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return decorated
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.get_default_graph()._building_function:
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if tf2.enabled():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the precense
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(
local_device.physical_device_desc) >=
min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evalaute `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def use_deterministic_cudnn(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CUDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = os.environ.get("TF_CUDNN_DETERMINISTIC", "")
os.environ["TF_CUDNN_DETERMINISTIC"] = "true"
result = f(self, *args, **kwargs)
os.environ["TF_CUDNN_DETERMINISTIC"] = original_var
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tensorflow.TF_GetXlaConstantFoldingDisabled()
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# The description is just for documentation purposes.
def disable_xla(description):
def disable_xla_impl(func):
"""Execute the test method only if xla is not enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_xla_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
def no_xla_auto_jit_impl(func):
"""This test is not intended to be run with XLA auto jit enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Skip test if using XLA is forced.
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return no_xla_auto_jit_impl
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
pywrap_tensorflow.TF_SetXlaAutoJitMode("2")
pywrap_tensorflow.TF_SetXlaMinClusterSize(1)
pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(False)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(True)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
context.context().summary_writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
return tensor.numpy()
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is a tensor then convert it to ndarray
if isinstance(a, ops.Tensor):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections.Mapping)
if a_is_dict != isinstance(b, collections.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %s. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = {}".format(x))
msgs.append("not equal rhs = {}".format(y))
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
HiwinRA605_socket_ros_test_20190625193623.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
point_data_flag = True
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
arm_mode_flag = True
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
if Arm_feedback == 0:
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
crawler.py
|
#-*- coding: UTF-8 -*-
import socket
import struct
import math
import sys
import time
import traceback
import json
import conf
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from spiders.amz_product_crawl import AmazonSpider_01
# 设置本机的hostname,用该名字去redis中读取相应的任务
AmazonSpider_01.hostname = conf.crawl_hostname
import multiprocessing
from multiprocessing import Process
from acache import l_pop
def crawl_process():
process = CrawlerProcess(get_project_settings())
process.crawl(AmazonSpider_01)
process.start()
if __name__ == '__main__':
'''
while True:
pid = Process(target=crawl_process, args=())
pid.start()
pid.join()
time.sleep(5)
'''
crawl_process()
|
train_faster_rcnn_alt_opt.py
|
#!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Faster R-CNN network using alternating optimization.
This tool implements the alternating optimization algorithm described in our
NIPS 2015 paper ("Faster R-CNN: Towards Real-time Object Detection with Region
Proposal Networks." Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun.)
"""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals
import argparse
import pprint
import numpy as np
import sys, os
import multiprocessing as mp
import cPickle
import shutil
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Faster R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--net_name', dest='net_name',
help='network name (e.g., "ZF")',
default=None, type=str)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name, rpn_file=None):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
if rpn_file is not None:
imdb.config['rpn_file'] = rpn_file
roidb = get_training_roidb(imdb)
return roidb, imdb
def get_solvers(net_name):
# Faster R-CNN Alternating Optimization
n = 'faster_rcnn_alt_opt'
# Solver for each training stage
solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'],
[net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'],
[net_name, n, 'stage2_rpn_solver60k80k.pt'],
[net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']]
solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
# Iterations for each training stage
#max_iters = [80000, 40000, 80000, 40000]
max_iters = [500, 500, 500, 500]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
cfg.MODELS_DIR, net_name, n, 'rpn_test.pt')
return solvers, max_iters, rpn_test_prototxt
# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code). To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None):
"""Train a Region Proposal Network in a separate training process.
"""
# Not using any proposals, just ground-truth boxes
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to Fast R-CNN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name)
print 'roidb len: {}'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rpn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
queue.put({'model_path': rpn_model_path})
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
rpn_test_prototxt=None):
"""Use a trained RPN to generate proposals.
"""
cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering
cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS
print 'RPN model: {}'.format(rpn_model_path)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Generate proposals on the imdb
rpn_proposals = imdb_proposals(rpn_net, imdb)
# Write proposals to disk and send the proposal file path through the
# multiprocessing queue
rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
rpn_proposals_path = os.path.join(
output_dir, rpn_net_name + '_proposals.pkl')
with open(rpn_proposals_path, 'wb') as f:
cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
queue.put({'proposal_path': rpn_proposals_path})
def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, rpn_file=None):
"""Train a Fast R-CNN using proposals generated by an RPN.
"""
cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly
cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead
cfg.TRAIN.IMS_PER_BATCH = 2
print 'Init model: {}'.format(init_model)
print 'RPN proposals: {}'.format(rpn_file)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Train Fast R-CNN
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
fast_rcnn_model_path = model_paths[-1]
# Send Fast R-CNN model path over the multiprocessing queue
queue.put({'model_path': fast_rcnn_model_path})
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# --------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are
# discarded (e.g. "del net" in Python code). To work around this issue, each
# training stage is executed in a separate process using
# multiprocessing.Process.
# --------------------------------------------------------------------------
# queue for communicated results between processes
mp_queue = mp.Queue()
# solves, iters, etc. for each training stage
solvers, max_iters, rpn_test_prototxt = get_solvers(args.net_name)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[0],
max_iters=max_iters[0],
cfg=cfg)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 Fast R-CNN using RPN proposals, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[1],
max_iters=max_iters[1],
cfg=cfg,
rpn_file=rpn_stage1_out['proposal_path'])
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, init from stage 1 Fast R-CNN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(fast_rcnn_stage1_out['model_path']),
solver=solvers[2],
max_iters=max_iters[2],
cfg=cfg)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage2_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage2_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 Fast R-CNN, init from stage 2 RPN R-CNN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(rpn_stage2_out['model_path']),
solver=solvers[3],
max_iters=max_iters[3],
cfg=cfg,
rpn_file=rpn_stage2_out['proposal_path'])
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage2_out = mp_queue.get()
p.join()
# Create final model (just a copy of the last stage)
final_path = os.path.join(
os.path.dirname(fast_rcnn_stage2_out['model_path']),
args.net_name + '_faster_rcnn_final.caffemodel')
print 'cp {} -> {}'.format(
fast_rcnn_stage2_out['model_path'], final_path)
shutil.copy(fast_rcnn_stage2_out['model_path'], final_path)
print 'Final model: {}'.format(final_path)
|
tdvt.py
|
"""
Test driver script for the Tableau Datasource Verification Tool
"""
import sys
if sys.version_info[0] < 3:
raise EnvironmentError("TDVT requires Python 3 or greater.")
import argparse
import glob
import json
import pathlib
import queue
import shutil
import threading
import time
import zipfile
from pathlib import Path
from typing import List
from .config_gen.datasource_list import print_ds, print_configurations, print_logical_configurations
from .config_gen.tdvtconfig import TdvtInvocation
from .config_gen.test_config import TestSet, SingleLogicalTestSet, SingleExpressionTestSet, FileTestSet, TestConfig, RunTimeTestConfig
from .setup_env import create_test_environment, add_datasource
from .tabquery import *
from .tdvt_core import generate_files, run_diff, run_tests
from .version import __version__
# This contains the dictionary of configs you can run.
from .config_gen.datasource_list import WindowsRegistry, MacRegistry, LinuxRegistry
class TestOutputFiles(object):
output_actuals = 'tdvt_actuals_combined.zip'
output_tabquery_log = 'tabquery_logs.zip'
output_csv = "test_results_combined.csv"
output_json = "tdvt_output_combined.json"
all_output_files = [output_actuals, output_csv, output_json, output_tabquery_log]
@staticmethod
def copy_output_file(src_name, src_dir, dst, trim_header, append=True):
src = os.path.join(src_dir, src_name)
dst = os.path.join(os.getcwd(), dst)
logging.debug("Copying {0} to {1}".format(src, dst))
try:
dst_exists = os.path.isfile(dst)
src_file = open(src, 'r', encoding='utf8')
mode = 'w' if not dst_exists or not append else 'a'
dst_file = open(dst, mode, encoding='utf8')
line_count = 0
for line in src_file:
line_count += 1
if line_count == 1 and trim_header and dst_exists:
continue
dst_file.write(line)
src_file.close()
dst_file.close()
except IOError as e:
logging.debug("Exception while copying files: " + str(e))
return
def do_test_queue_work(i, q):
"""This will be called in a queue.join() context, so make sure to mark all work items as done and
continue through the loop. Don't try and exit or return from here if there are still work items in the queue.
See the python queue documentation."""
abort_test_run = False
while True:
# This blocks if the queue is empty.
work = q.get()
work.run()
q.task_done()
class TestRunner():
def __init__(self, test_set, test_config, lock, verbose, thread_id):
threading.Thread.__init__(self)
self.test_set = test_set
self.test_config = test_config
self.error_code = 0
self.thread_id = thread_id
self.verbose = verbose
self.thread_lock = lock
self.temp_dir = make_temp_dir([self.test_config.suite_name, str(thread_id)])
self.test_config.output_dir = self.temp_dir
def copy_files_to_zip(self, dst_file_name, src_dir, is_logs):
dst = os.path.join(os.getcwd(), dst_file_name)
mode = 'w' if not os.path.isfile(dst) else 'a'
optional_dir_name = self.test_config.config_file.replace('.', '_')
if is_logs is True:
log_dir = os.path.join(src_dir, optional_dir_name)
glob_path = glob.glob(os.path.join(log_dir, '*.txt'))
glob_path.extend(glob.glob(os.path.join(log_dir, '*.log')))
glob_path.extend(glob.glob(os.path.join(log_dir, 'crashdumps/*')))
else:
glob_path = glob.glob(os.path.join(src_dir, 'actual.*'))
with zipfile.ZipFile(dst, mode, zipfile.ZIP_DEFLATED) as myzip:
for actual in glob_path:
path = pathlib.PurePath(actual)
file_to_be_zipped = path.name
inner_output = os.path.join(optional_dir_name, file_to_be_zipped)
myzip.write(actual, inner_output)
def copy_output_files(self):
TestOutputFiles.copy_output_file("test_results.csv", self.temp_dir, TestOutputFiles.output_csv, True)
def copy_test_result_file(self):
src = os.path.join(self.temp_dir, "tdvt_output.json")
dst = os.path.join(os.getcwd(), TestOutputFiles.output_json)
try:
if not os.path.isfile(dst):
shutil.copyfile(src, dst)
else:
src_file = open(src, 'r', encoding='utf8')
results = json.load(src_file)
src_file.close()
dst_file = open(dst, 'r', encoding='utf8')
existing_results = json.load(dst_file)
dst_file.close()
existing_results['failed_tests'].extend(results['failed_tests'])
existing_results['successful_tests'].extend(results['successful_tests'])
# Check the newly succeeding tests, and if they are in the existing failed
# test list, remove them from the failed test list since they now succeed
for element in results['successful_tests']:
for failed in existing_results['failed_tests']:
if element['test_name'] == failed['test_name']:
existing_results['failed_tests'].remove(failed)
dst_file = open(dst, 'w', encoding='utf8')
json.dump(existing_results, dst_file)
dst_file.close()
except IOError:
return
def copy_files_and_cleanup(self):
left_temp_dir = False
try:
self.copy_files_to_zip(TestOutputFiles.output_actuals, self.temp_dir, is_logs=False)
self.copy_files_to_zip(TestOutputFiles.output_tabquery_log, self.temp_dir, is_logs=True)
self.copy_output_files()
self.copy_test_result_file()
except Exception as e:
print(e)
pass
try:
if not self.test_config.leave_temp_dir:
shutil.rmtree(self.temp_dir)
else:
left_temp_dir = True
except:
pass
return left_temp_dir
def run(self):
# Send output to null.
DEVNULL = open(os.devnull, 'wb')
output = DEVNULL if not self.verbose else None
logging.debug("\nRunning tdvt " + str(self.test_config) + " tdvt thread id: " + str(self.thread_id) + "\n")
print("Running {0} {1} {2}\n".format(self.test_config.suite_name, self.test_config.config_file,
str(self.thread_id)))
start_time = time.time()
self.test_config.thread_id = self.thread_id
failed_tests, total_tests = run_tests(self.test_config, self.test_set)
logging.debug("\nFinished tdvt " + str(self.test_config) + "\n")
print("\nFinished {0} {1} {2}\n".format(self.test_config.suite_name, self.test_config.config_file,
str(self.thread_id)))
self.failed_tests = failed_tests
self.total_tests = total_tests
def delete_output_files(root_dir):
for f in TestOutputFiles.all_output_files:
out_file = os.path.join(root_dir, f)
for f in glob.glob(out_file):
if os.path.exists(out_file):
try:
os.unlink(out_file)
except Exception as e:
print(e)
continue
def get_datasource_registry(platform):
"""Get the datasources to run based on the suite parameter."""
if sys.platform.startswith("darwin"):
reg = MacRegistry()
elif sys.platform.startswith("linux"):
reg = LinuxRegistry()
else:
reg = WindowsRegistry()
return reg
def enqueue_single_test(args, ds_info: TestConfig, suite):
if not args.command == 'run-pattern' or not args.tds_pattern or (args.logical_pattern and args.expression_pattern):
return None, None
test_set = None
if args.logical_pattern:
test_set = SingleLogicalTestSet(suite, get_root_dir(), args.logical_pattern, args.tds_pattern,
args.test_pattern_exclude, ds_info)
else:
test_set = SingleExpressionTestSet(suite, get_root_dir(), args.expression_pattern, args.tds_pattern,
args.test_pattern_exclude, ds_info)
tdvt_invocation = TdvtInvocation(from_args=args, test_config=ds_info)
tdvt_invocation.tds = test_set.tds_name
tdvt_invocation.logical = test_set.is_logical
tdvt_invocation.config_file = test_set.config_name
return test_set, tdvt_invocation
def enqueue_failed_tests(run_file: Path, root_directory, args, rt: RunTimeTestConfig = None):
try:
with run_file.open('r', encoding='utf8') as file:
tests = json.load(file)
except:
logging.error("Error opening " + str(run_file))
return
delete_output_files(os.getcwd())
all_test_configs = {}
all_tdvt_test_configs = {}
all_test_pairs = []
failed_tests = tests['failed_tests']
# Go through the failed tests and group the ones that can be run together in a FileTestSet.
for f in failed_tests:
test_file_path = f['test_file']
test_root_dir = root_directory
tds_base = os.path.split(f['tds'])[1]
tds = get_tds_full_path(root_directory, tds_base)
logging.debug("Found failed test: " + test_file_path + " and tds " + tds)
tdvt_invocation = TdvtInvocation(from_json=f['test_config'])
if rt:
tdvt_invocation.set_run_time_test_config(rt)
tdvt_invocation.tds = tds
tdvt_invocation.leave_temp_dir = is_test(args) and args.noclean if args else False
suite_name = f['test_config']['suite_name']
password_file = f['password_file'] if 'password_file' in f else ''
# Use a hash of the test file path to distinguish unique test runs (since the config only supports one test path).
# other wise two tests with the same name could show up and the first result file would overwrite the second.
tt = "L" if tdvt_invocation.logical else "E"
test_set_unique_id = hashlib.sha224(
(os.path.split(test_file_path)[0] + "_" + tds_base + "_" + tt).replace("-", "_").encode())
test_set_unique_id = test_set_unique_id.hexdigest()
test_set_config = None
if not suite_name in all_test_configs:
all_test_configs[suite_name] = {}
if not test_set_unique_id in all_test_configs[suite_name]:
tdvt_invocation.output_dir = make_temp_dir([test_set_unique_id])
all_tdvt_test_configs[test_set_unique_id] = tdvt_invocation
run_time_config = RunTimeTestConfig(60*60, 1)
test_set_config = TestConfig(suite_name, '', run_time_config)
all_test_configs[suite_name][test_set_unique_id] = test_set_config
else:
test_set_config = all_test_configs[suite_name][test_set_unique_id]
current_test_set = None
if tdvt_invocation.logical:
current_test_set = test_set_config.get_logical_tests(test_set_unique_id)
else:
current_test_set = test_set_config.get_expression_tests(test_set_unique_id)
if current_test_set and len(current_test_set) == 1:
current_test_set = current_test_set[0]
if not current_test_set:
current_test_set = FileTestSet(suite_name, test_root_dir, test_set_unique_id, tds, tdvt_invocation.logical, suite_name,
password_file)
if tdvt_invocation.logical:
test_set_config.add_logical_testset(current_test_set)
else:
test_set_config.add_expression_testset(current_test_set)
current_test_set.append_test_file(test_file_path)
for suite_names in all_test_configs:
for test_set_id in all_test_configs[suite_names]:
test_set_config = all_test_configs[suite_names][test_set_id]
for each_test_set in test_set_config.get_logical_tests() + test_set_config.get_expression_tests():
tdvt_invocation = all_tdvt_test_configs[test_set_id]
all_test_pairs.append((each_test_set, tdvt_invocation))
logging.debug("Queing up tests: " + str(tdvt_invocation))
return all_test_pairs
def enqueue_tests(ds_info, args, suite):
tests = []
test_set_configs = []
if not is_test(args):
return test_set_configs
logging.debug("Enqueing tests for " + ds_info.dsname)
if args.logical_only or args.expression_only:
if args.logical_only:
tests.extend(ds_info.get_logical_tests(args.logical_only))
if args.expression_only:
tests.extend(ds_info.get_expression_tests(args.expression_only))
else:
tests.extend(ds_info.get_logical_tests(args.logical_only))
tests.extend(ds_info.get_expression_tests(args.expression_only))
# Make sure there are tests.
if not tests:
logging.error("No tests found")
return test_set_configs
for x in tests:
if not x.generate_test_file_list_from_config():
logging.error("No tests found for config " + str(x))
return test_set_configs
for test_set in tests:
tdvt_invocation = TdvtInvocation(from_args=args, test_config = ds_info)
tdvt_invocation.logical = test_set.is_logical_test()
tdvt_invocation.tds = test_set.tds_name
tdvt_invocation.config_file = test_set.config_name
test_set_configs.append((test_set, tdvt_invocation))
return test_set_configs
def get_level_of_parallelization(args):
# This indicates how many database/test suite combinations to run at once
max_threads = 6
if is_test(args) and args.thread_count:
max_threads = args.thread_count
max_threads = get_max_process_level_of_parallelization(max_threads)
print("Setting tdvt thread count to: " + str(max_threads))
return max_threads
list_usage_text = '''
Show all test suites
--ds
See what a test suite consists of
--ds sqlserver
--ds standard
Show logical configs:
--logical-config
'''
run_usage_text = '''
The 'run' argument can take a single datasource, a list of data sources, or a test suite name in any combination.
run postgres_odbc,postgres_jdbc
The 'run' argument can also take the --verify flag to run a connection test against tests with SmokeTest = True set.
run postgres_odbc --verify
Both logical and expression tests are run by default.
Run all expression tests
run postgres_odbc -e
Run all logical tests
run postgres_odbc -q
There are multiple suites of expression tests, for example, standard and LOD (level of detail). The config files that drive the tests
are named expression_test.sqlserver.cfg and expression.lod.sqlserver.cfg.
To run just one of those try entering part of the config name as an argument:
run postgres_odbc -e lod
'''
run_pattern_usage_text = '''
Run one expression test against many datasources
run-pattern postgres_odbc --exp exprtests/standard/setup.date.datepart.second*.txt --tdp cast_calcs.*.tds
Run one logical query test against many datasources
run-pattern postgres_odbc --logp logicaltests/setup/calcs/setup.BUGS.B1713.?.xml --tdp cast_calcs.*.tds
The 'exp' argument is a glob pattern that is used to find the test file using the relative test path.
The 'test-ex' argument can be used to exclude test files. This is a regular expression pattern.
The tds pattern is used to find the tds. Use a '*' character where the tds name will be substituted,
ie cast_calcs.*.tds
This can be combined with * to run an arbitrary set of 'correct' logical query tests against a datasources
run-pattern postgres_odbc --logp logicaltests/setup/calcs/setup.BUGS.*.?.xml --tdp cast_calcs.*.tds
Alternatively
run-pattern postgres_odbc --logp logicaltests/setup/calcs/setup.BUGS.*.dbo.xml --tdp cast_calcs.*.tds
But skip 59740?
run-pattern postgres_odbc --logp logicaltests/setup/calcs/setup.BUGS.*.dbo.xml --tdp cast_calcs.*.tds --test-ex 59740
'''
action_usage_text = '''
'''
run_file_usage_text = '''
'''
def create_parser():
parser = argparse.ArgumentParser(description='TDVT - Tableau Datasource Verification Tool.')
parser.add_argument('--verbose', dest='verbose', action='store_true', help='Verbose output.', required=False)
#Common run test options.
run_test_common_parser = argparse.ArgumentParser(description='Common test run options.', add_help=False)
run_test_common_parser.add_argument('--threads', '-t', dest='thread_count', type=int, help='Max number of threads to use.', required=False)
run_test_common_parser.add_argument('--no-clean', dest='noclean', action='store_true', help='Leave temp dirs.', required=False)
run_test_common_parser.add_argument('--generate', dest='generate', action='store_true', help='Generate logical query test files.', required=False)
run_test_common_parser.add_argument('--compare-sql', dest='compare_sql', action='store_true', help='Compare SQL.', required=False)
run_test_common_parser.add_argument('--nocompare-tuples', dest='nocompare_tuples', action='store_true', help='Do not compare Tuples.', required=False)
subparsers = parser.add_subparsers(help='commands', dest='command')
#Get information.
list_parser = subparsers.add_parser('list', help='List information about tests and configurations.', usage=list_usage_text)
list_group = list_parser.add_mutually_exclusive_group(required=True)
list_group.add_argument('--ds', dest='list_ds', help='List datasource config.', required=False, default=None, const='', nargs='?')
list_group.add_argument('--logical_config', dest='list_logical_configs', help='List available logical configs.', required=False, default=None, const='', nargs='?')
#Actions.
action_group = subparsers.add_parser('action', help='Various non-test actions.', usage=action_usage_text)
action_group.add_argument('--setup', dest='setup', action='store_true', help='Create setup directory structure.', required=False)
action_group.add_argument('--add_ds', dest='add_ds', help='Add a new datasource.', required=False)
action_group.add_argument('--diff-test', '-dd', dest='diff', help='Diff the results of the given test (ie exprtests/standard/setup.calcs_data.txt) against the expected files. Can be used with the sql and tuple options.', required=False)
action_group.add_argument('--generate', dest='action_generate', action='store_true', help='Generate logical query test files.', required=False)
#Run tests.
run_test_parser = subparsers.add_parser('run', help='Run tests.', parents=[run_test_common_parser], usage=run_usage_text)
run_test_parser.add_argument('ds', help='Comma separated list of Datasource names or groups to test. See the \'list\' command.', nargs='+')
run_test_parser.add_argument('--verify', dest='smoke_test', action='store_true', help='Verifies the connection to a data source against tests in your .ini file with SmokeTest = True.', required=False) # noqa: E501
run_test_parser.add_argument('--logical', '-q', dest='logical_only', help='Only run logical tests whose config file name matches the supplied string, or all if blank.', required=False, default=None, const='*', nargs='?')
run_test_parser.add_argument('--expression', '-e', dest='expression_only', help='Only run expression tests whose config file name matches the suppled string, or all if blank.', required=False, default=None, const='*', nargs='?')
#Run test pattern.
run_test_pattern_parser = subparsers.add_parser('run-pattern', help='Run individual tests using a pattern.', parents=[run_test_common_parser], usage=run_pattern_usage_text)
run_test_pattern_parser.add_argument('ds', help='Comma separated list of Datasource names or groups to test. See the \'list\' command.', nargs='+')
run_test_group = run_test_pattern_parser.add_mutually_exclusive_group(required=True)
run_test_group.add_argument('--exp', dest='expression_pattern', help='Only run expression tests whose name and path matches the supplied string. This is a glob pattern. Also you must set the tds-pattern to use when running the test.', required=False, default=None, const='', nargs='?')
run_test_group.add_argument('--logp', dest='logical_pattern', help='Only run logical tests whose name and path matches the supplied string. this is a glob pattern. Also you must set the tds-pattern to use when running the test. Use a ? to replace the logical query config component of the test name.', required=False, default=None, const='', nargs='?')
run_test_pattern_parser.add_argument('--tdp', dest='tds_pattern', help='The datasource tds pattern to use when running the test. See exp and logp arguments.', required=True, default=None, const='', nargs='?')
run_test_pattern_parser.add_argument('--test-ex', dest='test_pattern_exclude', help='Exclude tests whose name matches the supplied string. This is a regular expression pattern. Can be used with exp and logp arguments. Also set the tds-pattern to use when running the test.', required=False, default=None, const='', nargs='?')
#Run file.
run_file_parser = subparsers.add_parser('run-file', help='Run tests from a file.', parents=[run_test_common_parser], usage=run_file_usage_text)
run_file_parser.add_argument('run_file', help='Json file containing failed tests to run.')
return parser
def init():
parser = create_parser()
args = parser.parse_args()
# Create logger.
logging.basicConfig(filename='tdvt_log_combined.txt', level=logging.DEBUG, filemode='w',
format='%(asctime)s %(message)s')
logger = logging.getLogger()
ch = logging.StreamHandler()
if 'verbose' in args and args.verbose:
# Log to console also.
ch.setLevel(logging.DEBUG)
else:
args.verbose = False
ch.setLevel(logging.WARNING)
logger.addHandler(ch)
logging.debug('TDVT version: ' + str(__version__))
logging.debug('TDVT Arguments: ' + str(args))
ds_reg = get_datasource_registry(sys.platform)
configure_tabquery_path()
return parser, ds_reg, args
def is_test(args):
return args.command in ['run', 'run-pattern', 'run-file']
def active_thread_count(threads):
active = 0
for t in threads:
if t.is_alive():
active += 1
return active
def test_runner(all_tests, test_queue, max_threads):
for i in range(0, max_threads):
worker = threading.Thread(target=do_test_queue_work, args=(i, test_queue))
worker.setDaemon(True)
worker.start()
test_queue.join()
failed_tests = 0
total_tests = 0
for work in all_tests:
if work.copy_files_and_cleanup():
print("Left temp dir: " + work.temp_dir)
failed_tests += work.failed_tests if work.failed_tests else 0
total_tests += work.total_tests if work.total_tests else 0
return failed_tests, total_tests
def run_tests_impl(tests: List[TestSet], max_threads, args):
if not tests:
print("No tests found. Check arguments.")
sys.exit()
smoke_test_queue = queue.Queue()
smoke_tests = []
test_queue = queue.Queue()
all_work = []
lock = threading.Lock()
for test_set, test_config in tests:
runner = TestRunner(test_set, test_config, lock, args.verbose, len(all_work) + 1)
if test_set.smoke_test and not test_set.test_is_enabled is False:
smoke_tests.append(runner)
smoke_test_queue.put(runner)
else:
all_work.append(runner)
logging.debug("smoke test queue size is: " + str(len(smoke_tests)))
logging.debug("test queue size is: " + str(len(all_work)))
require_smoke_test = args.command == 'run' and args.smoke_test
if not smoke_tests:
logging.warning("No smoke tests detected.")
if require_smoke_test:
sys.exit(1)
else:
logging.warning("Tests will run without verifying the data source connection.")
if not all_work and not smoke_tests:
print("No tests found. Check arguments.")
sys.exit()
failing_ds = set()
failed_smoke_tests = 0
total_smoke_tests = 0
if smoke_tests:
smoke_test_threads = min(len(smoke_tests), max_threads)
print("Starting smoke tests. Creating", str(smoke_test_threads), "worker threads.")
failed_smoke_tests, total_smoke_tests = test_runner(smoke_tests, smoke_test_queue, smoke_test_threads)
print("{} smoke test(s) ran.".format(total_smoke_tests))
if failed_smoke_tests > 0:
failing_ds = set(item.test_set.ds_name for item in smoke_tests if item.failed_tests > 0)
print("{} smoke test(s) failed. Please check logs for information.".format(failed_smoke_tests))
if require_smoke_test:
print("Smoke tests failed, exiting.")
sys.exit(1)
if require_smoke_test:
sys.exit(0)
if failing_ds:
print("Tests for the following data source(s) will not be run: {}".format(', '.join(failing_ds)))
final_work = []
for item in all_work:
if item.test_set.ds_name in failing_ds:
item.test_set.test_is_skipped = True
final_work.append(item)
test_queue.put(item)
print("\nStarting tests. Creating " + str(max_threads) + " worker threads.")
start_time = time.time()
failed_tests, total_tests = test_runner(final_work, test_queue, max_threads)
failed_tests += failed_smoke_tests
total_tests += total_smoke_tests
print('\n')
print("Total time: " + str(time.time() - start_time))
print("Total failed tests: " + str(failed_tests))
print("Total tests ran: " + str(total_tests))
return failed_tests, total_tests
def get_ds_list(ds):
if not ds:
return []
ds_list = ds[0].split(',')
ds_list = [x.strip() for x in ds_list]
return ds_list
def run_desired_tests(args, ds_registry):
generate_files(ds_registry, False)
ds_to_run = ds_registry.get_datasources(get_ds_list(args.ds))
if not ds_to_run:
sys.exit(0)
if len(ds_to_run) > 0:
delete_output_files(os.getcwd())
if not tabquerycli_exists():
print("Could not find Tabquerycli.")
sys.exit(0)
max_threads = get_level_of_parallelization(args)
test_sets: List[TestSet] = []
for ds in ds_to_run:
ds_info = ds_registry.get_datasource_info(ds)
if not ds_info:
continue
print("Testing " + ds)
max_threads_per_datasource = ds_info.run_time_config.maxthread;
# if has multi datasource to run, then max_threads_per_datasource can not apply.
if max_threads_per_datasource > 0:
print("thread setting in " + ds + ".ini = " + str(max_threads_per_datasource))
if len(ds_to_run) == 1:
max_threads = max_threads_per_datasource
else:
print("Setting cannot apply since you are running multiple datasources.")
suite = ds
single_test, single_test_config = enqueue_single_test(args, ds_info, suite)
if single_test:
test_sets.extend([(single_test, single_test_config)])
else:
test_sets.extend(enqueue_tests(ds_info, args, suite))
failed_tests, total_tests = run_tests_impl(test_sets, max_threads, args)
return failed_tests
def run_file(run_file: Path, output_dir: Path, threads: int, args) -> int:
"""Rerun all the failed tests listed in the json file."""
logging.debug("Running failed tests from : " + str(run_file))
# See if we need to generate test setup files.
root_directory = get_root_dir()
failed_tests, total_tests = run_tests_impl(enqueue_failed_tests(run_file, root_directory, args), threads, args)
# This can be a retry-step.
return 0
def run_generate(ds_registry):
start_time = time.time()
generate_files(ds_registry, True)
end_time = time.time() - start_time
print("Done: " + str(end_time))
def main():
parser, ds_registry, args = init()
if args.command == 'action':
if args.setup:
print("Creating setup files...")
create_test_environment()
sys.exit(0)
elif args.add_ds:
add_datasource(args.add_ds, ds_registry)
generate_files(ds_registry, True)
sys.exit(0)
elif args.action_generate:
run_generate(ds_registry)
sys.exit(0)
elif is_test(args):
if args.generate:
run_generate(ds_registry)
# It's ok to call generate and then run some tests, so don't exit here.
if args.command == 'run-file':
output_dir = os.getcwd()
max_threads = get_level_of_parallelization(args)
sys.exit(run_file(Path(args.run_file), Path(output_dir), max_threads, args))
error_code = run_desired_tests(args, ds_registry)
sys.exit(error_code)
elif args.command == 'action' and args.diff:
tdvt_invocation = TdvtInvocation(from_args=args)
run_diff(tdvt_invocation, args.diff)
sys.exit(0)
elif args.command == 'list' and args.list_logical_configs is not None:
print_logical_configurations(ds_registry, args.list_logical_configs)
sys.exit(0)
elif args.command == 'list' and args.list_ds is not None:
print_configurations(ds_registry, [args.list_ds], args.verbose)
sys.exit(0)
logging.error("Could not interpret arguments. Nothing done.")
parser.print_help()
sys.exit(-1)
if __name__ == '__main__':
main()
|
test_drop_collection.py
|
import pdb
import pytest
import logging
import itertools
from time import sleep
import threading
from multiprocessing import Process
from utils import *
from constants import *
uniq_id = "drop_collection"
class TestDropCollection:
"""
******************************************************************
The following cases are used to test `drop_collection` function
******************************************************************
"""
def test_drop_collection(self, connect, collection):
'''
target: test delete collection created with correct params
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
'''
connect.drop_collection(collection)
time.sleep(2)
assert not connect.has_collection(collection)
def test_drop_collection_without_connection(self, collection, dis_connect):
'''
target: test describe collection, without connection
method: drop collection with correct params, with a disconnected instance
expected: drop raise exception
'''
with pytest.raises(Exception) as e:
dis_connect.drop_collection(collection)
def test_drop_collection_not_existed(self, connect):
'''
target: test if collection not created
method: random a collection name, which not existed in db,
assert the exception raised returned by drp_collection method
expected: False
'''
collection_name = gen_unique_str(uniq_id)
with pytest.raises(Exception) as e:
connect.drop_collection(collection_name)
@pytest.mark.level(2)
def test_create_drop_collection_multithread(self, connect):
'''
target: test create and drop collection with multithread
method: create and drop collection using multithread,
expected: collections are created, and dropped
'''
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uniq_id)
collection_names.append(collection_name)
connect.create_collection(collection_name, default_fields)
connect.drop_collection(collection_name)
for i in range(threads_num):
t = TestThread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert not connect.has_collection(item)
class TestDropCollectionInvalid(object):
"""
Test has collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_drop_collection_with_invalid_collectionname(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
def test_drop_collection_with_empty_collectionname(self, connect):
collection_name = ''
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
def test_drop_collection_with_none_collectionname(self, connect):
collection_name = None
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
|
RetinaDetector.py
|
# -*- coding:utf-8 -*-
# @author : cbingcan
# @time : 2021/8/24/024 15:29
import cv2
import os
import time
import numpy as np
from threading import Thread
import queue
from PIL import Image, ImageDraw
import sophon.sail as sail
import threading
from detection.rcnn.processing.bbox_transform import clip_boxes
from detection.rcnn.processing.generate_anchor import generate_anchors_fpn, anchors_plane
from detection.rcnn.processing.nms import gpu_nms_wrapper, cpu_nms_wrapper
from utils import *
class RetinaDetector():
"""
从queue读取4张图片,不够4张等待
"""
def __init__(self, engine, detect_queue, idx, queue_mutex, result_queue, result_mutex, predict_queue=None):
# def __init__(self, engine, detect_queue, idx, queue_mutex, predict_queue=None):
super().__init__()
print(idx,'RetinaDetector init')
self.engine = engine #检测引擎
self.handle = self.engine.get_handle()
self.net_out_queue = queue.Queue()
self.bmcv = sail.Bmcv(self.handle)
self.detect_queue = detect_queue
self.queue_mutex = queue_mutex
self.result_queue = result_queue
self.result_mutex = result_mutex
self.predict_queue = predict_queue
self.graph_name = self.engine.get_graph_names()[0]
self.input_name = self.engine.get_input_names(self.graph_name)[0]
self.input_shape = self.engine.get_input_shape(self.graph_name, self.input_name)
self.input_w = int(self.input_shape[-1])
self.input_h = int(self.input_shape[-2])
self.idx = idx
self.anchors_list = {}
self.gen_anchors()
self.image_idx = 0
self.batch_idx = 0
print(idx,'RetinaDetector init end')
def calc_im_scale_split(self, w, h):
scales_w = float(w)/float(self.input_w)
scales_h = float(h)/float(self.input_h)
return scales_w,scales_h
def post_process_run(self):
while True:
boxs_list = [[],[],[],[]]
net_out = self.net_out_queue.get()
if net_out is not None:
#det_list = self.parse_net_out(net_out)
detect_out_data = net_out['detection_out'][0][0]
order = np.where(([int(i[1]) in [1] for i in detect_out_data[:]]))
detect_out_data = detect_out_data[order]
#print('[post_process_run] {}'.format(detect_out_data.shape))
for out_data in detect_out_data:
if out_data[2] < 0.6:
continue
if int(out_data[1]) != 1:
continue
batch_idx = int(out_data[0])
im_scale_h = 6
im_scale_w = 6
pad_x = 0
pad_y = 0
x1 = (out_data[3] * self.input_w - pad_x) * im_scale_w
y1 = (out_data[4] * self.input_h - pad_y) * im_scale_h
x2 = (out_data[5] * self.input_w - pad_x) * im_scale_w
y2 = (out_data[6] * self.input_h - pad_y) * im_scale_h
score = out_data[2]
boxs_list[batch_idx].append([x1,y1,x2,y2,score])
#print('[post_process_run] get box {}'.format(boxs_list))
def cssd_post_process(self, net_out):
boxs_list = [[], [], [], []]
detect_out_data = net_out['detection_out'][0][0]
order = np.where(([int(i[1]) in [1] for i in detect_out_data[:]]))
detect_out_data = detect_out_data[order]
# print('[post_process_run] {}'.format(detect_out_data.shape))
for out_data in detect_out_data:
if out_data[2] < 0.92:
continue
#if int(out_data[1]) != 1:
# continue
batch_idx = int(out_data[0])
im_scale_h = 6
im_scale_w = 6
pad_x = 0
pad_y = 0
x1 = (out_data[3] * self.input_w - pad_x) * im_scale_w
y1 = (out_data[4] * self.input_h - pad_y) * im_scale_h
x2 = (out_data[5] * self.input_w - pad_x) * im_scale_w
y2 = (out_data[6] * self.input_h - pad_y) * im_scale_h
score = out_data[2]
boxs_list[batch_idx].append([x1, y1, x2, y2, score])
return boxs_list
def postprocess(self, im_tensors, output_tensors):
"""
后处理
"""
boxs_list = []
try:
#t = time.time()
# print("推理开始:shape = " , im_tensors.shape)
self.engine.process(self.graph_name, {self.input_name: im_tensors}, output_tensors)
net_out = {}
#t1 = time.time()
#print('[postprocess] process time {}'.format((t1 - t) * 1000))
for output_name, output_tensor in output_tensors.items():
output_scale = 1.0#self.engine.get_output_scale(self.graph_name, output_name)
out_net = output_tensor.scale_to(output_scale)
net_out[output_name] = out_net
t2 = time.time()
det_list = []
det_list = self.cssd_post_process(net_out)
self.batch_idx += 1
for det_box, bmimage, image_id, frame_number in zip(det_list, self.ost_frame_list,self.img_id_list,self.frame_number):
self.image_idx += 1
scale_w,scale_h = self.calc_im_scale_split(bmimage.width(), bmimage.height())
for idx_temp in range(len(det_box)):
det_box[idx_temp][0]*=scale_w
det_box[idx_temp][1]*=scale_h
det_box[idx_temp][2]*=scale_w
det_box[idx_temp][3]*=scale_h
# if len(det_box) > 0:
# for idx_temp, box in enumerate(det_box):
# self.bmcv.rectangle(bmimage, \
# int(box[0]/6), int(box[1]/6), \
# int((box[2]-box[0])/6), int((box[3]-box[1])/6), (255, 0, 0), 3)
# self.bmcv.imwrite('/data/video/save_result/{}_result_{}.jpg'.format(image_id,t2), bmimage)
self.result_mutex.acquire()
if len(self.result_queue) > 40:
print("Result Queue Length more than 40")
self.result_queue.pop(0)
self.result_queue.append({"id": image_id, "frame": bmimage, 'detection': det_box, "frame_number": frame_number})
self.result_mutex.release()
except Exception as e:
# print("erro: {}".format(e.errno))
print('error in postprocess:', e)
pass
def run(self, idx, num):
#try:
print(idx,'wxc_run_start')
self.postprocess_t = Thread(target=self.post_process_run, args=())
self.postprocess_t.start()
input_shape = [4,3,self.input_h,self.input_w]
input_dtype = self.engine.get_input_dtype(self.graph_name, self.input_name)
input_scale = self.engine.get_input_scale(self.graph_name, self.input_name)
img_dtype = self.bmcv.get_bm_image_data_format(input_dtype)
output_name = self.engine.get_output_names(self.graph_name)
#print('[Retina] output_tensors {}'.format(output_tensors))
self.im_scale_list = []
#scale = self.engine.get_input_scale(graph_name, input_name)
scale = 1.0
#ab = [x * scale for x in [1, 103.939, 1, 116.779, 1, 123.68]]
ab = [x * input_scale for x in [1, 0, 1, 0, 1, 0]]
#print('scale:', scale, 'ab:', ab)
use_local_img=False
input = sail.Tensor(self.handle, input_shape, input_dtype, False, False)
while True:
#try:
self.img_list = []
self.ost_frame_list = []
self.img_id_list = []
self.frame_number = []
i = 0
tmp_img = sail.BMImageArray4D()
output = sail.BMImageArray4D(self.handle, input_shape[2], input_shape[3], \
sail.Format.FORMAT_BGR_PLANAR, img_dtype)
while True:
if len(self.detect_queue) == 0:
time.sleep(0.02)
continue
if use_local_img:
im_tensor = read_img_as_array('100.jpg', (800,450))
else:
self.queue_mutex.acquire()
data = self.detect_queue.pop(0)
im_tensor = data["frame"]
ost_frame_tensor = data["ost_frame"]
# im_tensor_ost = data["frame"]
# im_tensor = sail.BMImage(self.handle, input_shape[2], input_shape[3], sail.Format.FORMAT_BGR_PLANAR, sail.ImgDtype.DATA_TYPE_EXT_1N_BYTE)
# self.bmcv.vpp_resize(im_tensor_ost, im_tensor, self.input_w, self.input_h)
im_id = data["id"]
frame_number = data["frame_number"]
self.queue_mutex.release()
self.im_scale_list.append(data["im_scale"])
# print("im_tensor_ost.format(): {}".format(ost_frame_tensor.format()))
# print(" im_tensor.format(): {}".format(im_tensor.format()))
self.ost_frame_list.append(ost_frame_tensor)
self.img_list.append(im_tensor)
self.frame_number.append(frame_number)
# print(" im_tensor.format(): {}".format(im_tensor.format()))
self.img_id_list.append(im_id)
tmp_img[i] = im_tensor.data()
i += 1
if i > 3:
break
self.bmcv.convert_to(tmp_img, output, ((ab[0], ab[1]),(ab[2], ab[3]),(ab[4], ab[5])))
self.bmcv.bm_image_to_tensor(output, input)
t1 = time.time()
output_tensors = {}
for a in output_name:
output_dtype = self.engine.get_output_dtype(self.graph_name, a)
output_shape = [1, 1, 400, 7]
output_tensor = sail.Tensor(self.handle, output_shape, output_dtype, True, True)
output_tensors[a] = output_tensor
self.postprocess(input, output_tensors)
num.value +=4.0
def bbox_pred(self, boxes, box_deltas):
"""
Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
:param boxes: !important [N 4]
:param box_deltas: [N, 4 * num_classes]
:return: [N 4 * num_classes]
"""
t = time.time()
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * (widths - 1.0)
ctr_y = boxes[:, 1] + 0.5 * (heights - 1.0)
dx = box_deltas[:, 0:1]
dy = box_deltas[:, 1:2]
dw = box_deltas[:, 2:3]
dh = box_deltas[:, 3:4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(box_deltas.shape)
#print('[bbox_pred] 1 {}'.format((time.time() - t) * 1000))
t = time.time()
# x1
pred_boxes[:, 0:1] = pred_ctr_x - 0.5 * (pred_w - 1.0)
# y1
pred_boxes[:, 1:2] = pred_ctr_y - 0.5 * (pred_h - 1.0)
# x2
pred_boxes[:, 2:3] = pred_ctr_x + 0.5 * (pred_w - 1.0)
# y2
pred_boxes[:, 3:4] = pred_ctr_y + 0.5 * (pred_h - 1.0)
if box_deltas.shape[1] > 4:
pred_boxes[:, 4:] = box_deltas[:, 4:]
#print('[bbox_pred] {}'.format(pred_boxes))
return pred_boxes
def gen_anchors(self):
ctx_id = 0
self.nms_threshold = 0.4
fpn_keys = []
_ratio = (1.,)
self._feat_stride_fpn = [32, 16, 8]
height_list = [14, 28, 56]
width_list = [25, 50, 100]
anchor_cfg = {
'32': {
'SCALES': (32, 16),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'16': {
'SCALES': (8, 4),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'8': {
'SCALES': (2, 1),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
}
}
for s in self._feat_stride_fpn:
fpn_keys.append('stride%s' % s)
dense_anchor = False
_anchors_fpn = dict(
zip(
fpn_keys,
generate_anchors_fpn(dense_anchor=dense_anchor,
cfg=anchor_cfg)))
for k in _anchors_fpn:
v = _anchors_fpn[k].astype(np.float32)
_anchors_fpn[k] = v
self._num_anchors = dict(
zip(fpn_keys,
[anchors.shape[0] for anchors in _anchors_fpn.values()]))
self.nms = gpu_nms_wrapper(self.nms_threshold, ctx_id)
for _idx, s in enumerate(self._feat_stride_fpn):
stride = int(s)
height = height_list[_idx]
width = width_list[_idx]
A = self._num_anchors['stride%s' % s]
K = height * width
anchors_fpn = _anchors_fpn['stride%s' % s]
anchors = anchors_plane(height, width, stride, anchors_fpn)
anchors = anchors.reshape((K * A, 4))
self.anchors_list[s] = anchors
#print('[gen_anchors] {}'.format(self.anchors_list))
def parse_net_out(self, net_out):
#ctx_id = 0
decay4 = 0.5
#nms_threshold = 0.4
#vote = False
#nocrop = False
#fpn_keys = []
#anchor_cfg = None
#preprocess = False
_ratio = (1., )
#_feat_stride_fpn = [32, 16, 8]
im_info = [self.input_h, self.input_w]
#im_scale = 0.416666
'''
anchor_cfg = {
'32': {
'SCALES': (32, 16),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'16': {
'SCALES': (8, 4),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'8': {
'SCALES': (2, 1),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
}
}
'''
cascade = 0
bbox_stds = [1.0, 1.0, 1.0, 1.0]
det_list = []
proposals_list = []
scores_list = []
strides_list = []
threshold=0.5
sym_idx = 0
bt = time.time()
for c_idx in range(4):
im_scale = self.im_scale_list[c_idx]
for _idx, s in enumerate(self._feat_stride_fpn):
_key = 'stride%s' % s
stride = int(s)
is_cascade = False
if cascade:
is_cascade = True
#scores = np.expand_dims(net_out['face_rpn_cls_prob_reshape_' + _key + '_output'][c_idx,:,:,:], axis=0)
scores = np.expand_dims(net_out['rpn_cls_prob_reshape_' + _key + '_output'][c_idx,:,:,:], axis=0)
scores = scores[:, self._num_anchors['stride%s' % s]:, :, :]
#bbox_deltas = np.expand_dims(net_out['face_rpn_bbox_pred_' + _key + '_output'][c_idx,:,:,:], axis=0)
#height, width = bbox_deltas.shape[2], bbox_deltas.shape[3]
A = self._num_anchors['stride%s' % s]
#K = height * width
#anchors_fpn = _anchors_fpn['stride%s' % s]
#anchors = anchors_plane(height, width, stride, anchors_fpn)
#anchors = anchors.reshape((K * A, 4))
#print('[parse_net_out] anchors {}'.format(anchors))
scores = scores.transpose((0, 2, 3, 1)).reshape((-1, 1))
if stride == 4 and decay4 < 1.0:
scores *= decay4
scores_ravel = scores.ravel()
order = np.where(scores_ravel >= threshold)[0]
#print('[parse_net_out] order {}'.format(order))
scores = scores[order]
if len(scores) == 0:
#scores_list.append([])
#proposals_list.append([])
continue
bbox_deltas = np.expand_dims(net_out['rpn_bbox_pred_' + _key + '_output'][c_idx, :, :, :], axis=0)
anchors = self.anchors_list[s][order, :]
bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1))
bbox_pred_len = bbox_deltas.shape[3] // A
bbox_deltas = bbox_deltas.reshape((-1, bbox_pred_len))
bbox_deltas = bbox_deltas[order, :]
bbox_deltas[:, 0::4] = bbox_deltas[:, 0::4] * bbox_stds[0]
bbox_deltas[:, 1::4] = bbox_deltas[:, 1::4] * bbox_stds[1]
bbox_deltas[:, 2::4] = bbox_deltas[:, 2::4] * bbox_stds[2]
bbox_deltas[:, 3::4] = bbox_deltas[:, 3::4] * bbox_stds[3]
proposals = self.bbox_pred(anchors, bbox_deltas)
proposals = clip_boxes(proposals, im_info)
#proposals = proposals[order, :]
proposals[:, 0:4] /= im_scale
proposals_list.append(proposals)
scores_list.append(scores)
if self.nms_threshold < 0.0:
_strides = np.empty(shape=(scores.shape),
dtype=np.float32)
_strides.fill(stride)
strides_list.append(_strides)
sym_idx += 2
if len(proposals_list) == 0:
det_list.append([])
continue
proposals = np.vstack(proposals_list)
scores = np.vstack(scores_list)
scores_ravel = scores.ravel()
order = scores_ravel.argsort()[::-1]
proposals = proposals[order, :]
scores = scores[order]
pre_det = np.hstack((proposals[:, 0:4], scores)).astype(np.float32, copy=False)
t0 = time.time()
keep = self.nms(pre_det)
#print('c_idx:{} nms {} ms'.format(c_idx, (time.time()-t0)*1000))
det = np.hstack((pre_det, proposals[:, 4:]))
det = det[keep, :]
det_list.append(det)
#print('total cost {} ms'.format((time.time()-bt)*1000))
return det_list
|
freqperyear.py
|
import argparse
import os
import random
import collections
from Queue import Empty
from multiprocessing import Process, Queue
from nltk.corpus import stopwords
import ioutils
from cooccurrence import matstore
def merge(years, out_pref, out_dir):
word_freqs = collections.defaultdict(dict)
word_lists = {}
word_set = set([])
for year in years:
word_lists[year] = ioutils.load_pickle(out_dir + str(year) + "tmp.pkl")
word_set = word_set.union(set(word_lists[year]))
os.remove(out_dir + str(year) + "tmp.pkl")
for year in years:
year_freqs= ioutils.load_pickle(out_dir + str(year) + "freqstmp.pkl")
for word in word_set:
if word not in year_freqs:
word_freqs[word][year] = float('nan')
else:
word_freqs[word][year] = year_freqs[word]
os.remove(out_dir + str(year) + "freqstmp.pkl")
ioutils.write_pickle(word_freqs, out_pref + "-freqs.pkl")
ioutils.write_pickle(word_lists, out_pref + ".pkl")
def main(proc_num, queue, out_pref, out_dir, in_dir, index, freq_thresh, lang):
random.shuffle(years)
print proc_num, "Start loop"
while True:
try:
year = queue.get(block=False)
except Empty:
print proc_num, "Finished"
break
stop_set = set(stopwords.words(lang))
word_freqs = {}
print "Loading mat for year", year
year_mat = matstore.retrieve_mat_as_coo(in_dir + str(year) + ".bin")
year_mat = year_mat.tocsr()
year_mat = year_mat / year_mat.sum()
print "Processing data for year", year
for word_i in xrange(year_mat.shape[0]):
word = index[word_i]
if not word.isalpha() or word in stop_set or len(word) == 1:
continue
year_freq = year_mat[word_i, :].sum()
word_freqs[word] = year_freq
print "Writing data"
sorted_list = sorted(word_freqs.keys(), key = lambda key : word_freqs[key], reverse=True)
sorted_list = [word for word in sorted_list
if word_freqs[word] > freq_thresh]
ioutils.write_pickle(sorted_list, out_dir + str(year) + "tmp.pkl")
ioutils.write_pickle(word_freqs, out_dir + str(year) + "freqstmp.pkl")
def run_parallel(num_procs, years, out_pref, out_dir, in_dir, index, freq_thresh, lang):
queue = Queue()
for year in years:
queue.put(year)
procs = [Process(target=main, args=[i, queue, out_pref, out_dir, in_dir, index, freq_thresh, lang]) for i in range(num_procs)]
for p in procs:
p.start()
for p in procs:
p.join()
merge(years, out_pref, out_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get yearly sorted by-frequency list of (non-stop) words and dicts with their frequencies")
parser.add_argument("out_dir", help="output directory")
parser.add_argument("in_dir", help="directory with 5 grams and index")
parser.add_argument("num_procs", type=int, help="num procs")
parser.add_argument("--start-year", type=int, default=1900, help="start year (inclusive)")
parser.add_argument("--end-year", type=int, default=2000, help="end year (inclusive)")
parser.add_argument("--freq-thresh", type=int, default=7, help="frequency threshold (neg. power of 10)")
parser.add_argument("--lang", type=str, default="english", help="language")
args = parser.parse_args()
years = range(args.start_year, args.end_year + 1)
index = ioutils.load_pickle(args.in_dir + "/merged_list.pkl")
out_pref = args.out_dir + "/freqnonstop_peryear-" + str(years[0]) + "-" + str(years[-1]) + "-" + str(args.freq_thresh)
freq_thresh = 10.0 ** (-1.0 * float(args.freq_thresh))
run_parallel(args.num_procs, years, out_pref , args.out_dir + "/", args.in_dir + "/", index, freq_thresh, args.lang)
|
telemetry.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import threading
from knack.log import get_logger
from azext_devops.devops_sdk.v5_0.customer_intelligence.models import CustomerIntelligenceEvent
logger = get_logger(__name__)
vsts_tracking_data = CustomerIntelligenceEvent()
def init_telemetry():
global vsts_tracking_data # pylint: disable=global-statement
if vsts_tracking_data is None:
vsts_tracking_data = CustomerIntelligenceEvent()
if vsts_tracking_data.properties is None:
vsts_tracking_data.properties = {}
def try_send_telemetry_data(organization):
try:
if _is_telemetry_enabled():
logger.debug('Azure devops telemetry enabled.')
_try_send_tracking_ci_event_async(organization)
else:
logger.debug('Azure devops telemetry disabled.')
except BaseException as ex: # pylint: disable=broad-except
logger.debug(ex, exc_info=True)
logger.debug('Azure devops telemetry sending failed.')
def set_tracking_data(**kwargs):
init_telemetry()
try:
vsts_tracking_data.area = 'AzureDevopsCli'
vsts_tracking_data.properties = {}
command_line_args = vars(kwargs.get('args', None))
command_line_split = command_line_args['command'].split()
vsts_tracking_data.feature = command_line_split[0]
if len(command_line_split) > 1:
vsts_tracking_data.properties['Command'] = ' '.join(command_line_split[1:])
args = []
for key, value in command_line_args.items():
if value and isinstance(value, str) and not key.startswith('_') and key != 'command':
args.append(key)
vsts_tracking_data.properties['Args'] = ' '.join(args)
vsts_tracking_data.properties['ShellType'] = _get_shell_type()
import sys
vsts_tracking_data.properties['IsInteractive'] = str(sys.stdin.isatty())
vsts_tracking_data.properties['OutputType'] = command_line_args['_output_format']
except BaseException as ex: # pylint: disable=broad-except
logger.debug(ex, exc_info=True)
def _is_telemetry_enabled():
from azure.cli.core.telemetry import is_telemetry_enabled
return is_telemetry_enabled()
def _try_send_tracking_ci_event_async(organization=None):
if (vsts_tracking_data is not None and vsts_tracking_data.area is not None and
vsts_tracking_data.feature is not None):
logger.debug("Logging telemetry to azure devops server.")
try:
thread = threading.Thread(target=_send_tracking_ci_event, args=[organization])
thread.start()
except BaseException as ex: # pylint: disable=broad-except
# we should always continue if we fail to set tracking data
logger.debug(ex, exc_info=True)
else:
logger.debug("Skipping telemetry to azure devops server.")
def _send_tracking_ci_event(organization=None, ci_client=None):
from .services import get_ci_client
if ci_client is None:
ci_client = get_ci_client(organization=organization)
try:
ci_client.publish_events([vsts_tracking_data])
return True
except BaseException as ex: # pylint: disable=broad-except
logger.debug(ex, exc_info=True)
return False
# azure cli uses this to get shell type from os environment
def _get_shell_type():
import os
if 'ZSH_VERSION' in os.environ:
return 'zsh'
if 'BASH_VERSION' in os.environ:
return 'bash'
if 'KSH_VERSION' in os.environ or 'FCEDIT' in os.environ:
return 'ksh'
if 'WINDIR' in os.environ:
return 'cmd'
return _remove_cmd_chars(_remove_symbols(os.environ.get('SHELL')))
def _remove_cmd_chars(s):
if isinstance(s, str):
return s.replace("'", '_').replace('"', '_').replace('\r\n', ' ').replace('\n', ' ')
return s
def _remove_symbols(s):
if isinstance(s, str):
for c in '$%^&|':
s = s.replace(c, '_')
return s
|
dl_yaani24.py
|
from ._utils import *
baseURL = 'https://yaani24.net'
async def GetOneVideoURL(aniLink, loop):
soup = await GetSoup(aniLink, referer=baseURL, loop=loop)
epiTitle = soup.find('div', {'class':'view_info_box'}).find_all('div')[0].text
bigTitle = ' '.join(epiTitle.split(' ')[0:-1])
link = soup.find('video', {'id':'video'})['src']
return [bigTitle, {epiTitle:link}]
async def GetVideosURL(aniLink, loop):
ListOfVideosURL = {}
soup = await GetSoup(aniLink, referer=baseURL, loop=loop)
info = soup.find('div', {'class':'ani_video_list'}).find_all('a')
bigTitle = soup.h1.text
epiUrls = [baseURL + i['href'] for i in info]
epiTitles = [i.img['alt'] for i in info]
tasks = [asyncio.ensure_future(GetSoup(u, referer=baseURL, loop=loop)) for u in epiUrls]
ySoup = await asyncio.gather(*tasks)
links = [y.find('video', {'id':'video'})['src'] for y in ySoup]
for idx in range(len(links)):
ListOfVideosURL[epiTitles[idx]] = links[idx]
return [bigTitle, ListOfVideosURL]
async def main(aniLink, loop):
if 'ani_view' in aniLink:
g = await GetOneVideoURL(aniLink, loop)
else:
g = await GetVideosURL(aniLink, loop)
aniTitle = g[0]
aniVideoList = g[1]
dirLoc = '[yaani24]' + GetFileName(aniTitle)
MakeDirectory(f'./{download_folder}/{dirLoc}')
thrList = []
for k, v in aniVideoList.items():
fname = f'./{download_folder}/{dirLoc}/{GetFileName(k)}.mp4'
if isfile(fname) != True:
thrList.append(
Thread(target=BigFileDownload, args=(f'./{download_folder}/{dirLoc}/{GetFileName(k)}.mp4', v, baseURL))
)
for thr in thrList:
thr.start()
thr.join()
def run(aniLink):
loop = asyncio.new_event_loop()
loop.run_until_complete(main(aniLink, loop))
loop.close()
|
plot_emgs.py
|
import pygame
from pygame.locals import *
import multiprocessing
from pyomyo import Myo, emg_mode
# ------------ Myo Setup ---------------
q = multiprocessing.Queue()
def worker(q):
m = Myo(mode=emg_mode.RAW)
m.connect()
def add_to_queue(emg, movement):
q.put(emg)
m.add_emg_handler(add_to_queue)
def print_battery(bat):
print("Battery level:", bat)
m.add_battery_handler(print_battery)
# Orange logo and bar LEDs
m.set_leds([128, 0, 0], [128, 0, 0])
# Vibrate to know we connected okay
m.vibrate(1)
"""worker function"""
while True:
m.run()
print("Worker Stopped")
last_vals = None
def plot(scr, vals):
DRAW_LINES = True
global last_vals
if last_vals is None:
last_vals = vals
return
D = 5
scr.scroll(-D)
scr.fill((0, 0, 0), (w - D, 0, w, h))
for i, (u, v) in enumerate(zip(last_vals, vals)):
if DRAW_LINES:
pygame.draw.line(scr, (0, 255, 0),
(w - D, int(h/9 * (i+1 - u))),
(w, int(h/9 * (i+1 - v))))
pygame.draw.line(scr, (255, 255, 255),
(w - D, int(h/9 * (i+1))),
(w, int(h/9 * (i+1))))
else:
c = int(255 * max(0, min(1, v)))
scr.fill((c, c, c), (w - D, i * h / 8, D, (i + 1) * h / 8 - i * h / 8))
pygame.display.flip()
last_vals = vals
# -------- Main Program Loop -----------
if __name__ == "__main__":
p = multiprocessing.Process(target=worker, args=(q,))
p.start()
w, h = 800, 600
scr = pygame.display.set_mode((w, h))
try:
while True:
while not(q.empty()):
emg = list(q.get())
plot(scr, [e / 500. for e in emg])
print(emg)
except KeyboardInterrupt:
print("Quitting")
pygame.quit()
quit()
|
manager.py
|
#!/usr/bin/env python3
import datetime
import os
import signal
import sys
import traceback
from multiprocessing.context import Process
import cereal.messaging as messaging
import selfdrive.crash as crash
from common.params import Params
from common.text_window import TextWindow
from selfdrive.boardd.set_time import set_time
from selfdrive.hardware import HARDWARE, TICI
from selfdrive.manager.helpers import unblock_stdout
from selfdrive.manager.process import ensure_running, launcher
from selfdrive.manager.process_config import managed_processes
from selfdrive.registration import register
from selfdrive.swaglog import cloudlog, add_file_handler
from selfdrive.version import dirty, version
from selfdrive.hardware.eon.apk import system
def manager_init():
# update system time from panda
set_time(cloudlog)
params = Params()
params.manager_start()
default_params = [
("CompletedTrainingVersion", "0"),
("HasAcceptedTerms", "0"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("IsMetric", "1"),
# HKG
("UseClusterSpeed", "1"),
("LongControlEnabled", "0"),
("MadModeEnabled", "1"),
("AutoLaneChangeEnabled", "0"),
("SccSmootherSlowOnCurves", "0"),
("SccSmootherSyncGasPressed", "0"),
("ShowDebugUI", "0")
]
if TICI:
default_params.append(("IsUploadRawEnabled", "1"))
if params.get_bool("RecordFrontLock"):
params.put_bool("RecordFront", True)
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put_bool("Passive", bool(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
os.umask(0) # Make sure we can create files with 777 permissions
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set dongle id
reg_res = register(show_spinner=True)
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog and loggerd
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty,
device=HARDWARE.get_device_type())
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, device=HARDWARE.get_device_type())
def manager_prepare():
for p in managed_processes.values():
p.prepare()
def manager_cleanup():
for p in managed_processes.values():
p.stop()
cloudlog.info("everything is dead")
def manager_thread():
Process(name="shutdownd", target=launcher, args=("selfdrive.shutdownd",)).start()
system("am startservice com.neokii.optool/.MainService")
system("am startservice com.neokii.openpilot/.MainService")
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
#subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
ignore = []
if os.getenv("NOBOARD") is not None:
ignore.append("pandad")
if os.getenv("BLOCK") is not None:
ignore += os.getenv("BLOCK").split(",")
ensure_running(managed_processes.values(), started=False, not_run=ignore)
started_prev = False
params = Params()
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['managerState'])
while True:
sm.update()
not_run = ignore[:]
if sm['deviceState'].freeSpacePercent < 5:
not_run.append("loggerd")
started = sm['deviceState'].started
driverview = params.get_bool("IsDriverViewEnabled")
ensure_running(managed_processes.values(), started, driverview, not_run)
# trigger an update after going offroad
if started_prev and not started and 'updated' in managed_processes:
os.sync()
managed_processes['updated'].signal(signal.SIGHUP)
started_prev = started
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
for p in managed_processes.values() if p.proc]
cloudlog.debug(' '.join(running_list))
# send managerState
msg = messaging.new_message('managerState')
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
pm.send('managerState', msg)
# TODO: let UI handle this
# Exit main loop when uninstall is needed
if params.get_bool("DoUninstall"):
break
def main():
prepare_only = os.getenv("PREPAREONLY") is not None
manager_init()
# Start UI early so prepare can happen in the background
if not prepare_only:
managed_processes['ui'].start()
manager_prepare()
if prepare_only:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
manager_cleanup()
if Params().get_bool("DoUninstall"):
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
unblock_stdout()
try:
main()
except Exception:
add_file_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
client.py
|
# !/usr/bin/python3
# -*- coding:utf-8 -*-
import socket
import threading
def client_send(tcpSock):
while True:
try:
sendInfo = input('')
tcpSock.send(sendInfo.encode('utf-8'))
except:
pass
def client_recv(tcpSock):
while True:
try:
recvInfo = tcpSock.recv(1024)
print('\r>> %s\r\n<< ' % recvInfo.decode('utf-8'), end = '')
except:
pass
def main():
serverIp=input('请输入要连接的ip: ')
serverPort=int(input('请输入要使用的端口: '))
tcpSock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
tcpSock.connect((serverIp, serverPort))
except:
print('ERROR: Cannot connect. ')
return
th_send=threading.Thread(target = client_send, args = (tcpSock, ))
th_recv=threading.Thread(target = client_recv, args = (tcpSock, ))
th_send.start()
th_recv.start()
th_send.join()
th_recv.join()
if __name__ == '__main__':
main()
|
CCTV.py
|
import logging
import threading
import time
import datetime
import picamera
import keyboard
import cv2
camera = picamera.PiCamera()
savepath = '/home/pi/bestwo/python3/savedVideo'
def thread_function(name):
now = time.strftime('%H-%M-%S',time.localtime(time.time()))
print(now)
camera.start_recording(output = savepath + '/video' +now+ '.h264')
camera.start_preview(fullscreen=False, window=(100,20,640,480))
while True:
nowsec=time.strftime('%S', time.localtime(time.time()))
print(nowsec)
time.sleep(1)
if nowsec=='59':
break
camera.stop_preview()
camera.stop_recording()
#1st recording trial done. and a loop starts
while True:
while True:
nowsec=time.strftime('%S', time.localtime(time.time()))
print(nowsec)
time.sleep(1)
if nowsec=='00':
break
camera.start_recording(output = savepath + '/video' +now+ '.h264')
camera.start_preview(fullscreen=False, window=(100,20,640,480))
while True:
nowsec=time.strftime('%S', time.localtime(time.time()))
print(nowsec)
time.sleep(0.5)
if nowsec=='59':
break
camera.stop_preview()
camera.stop_recording()
## while True:
## now = time.strftime('%H-%M-%s',time.localtime(time.time()))
## nowsec=time.strftime('%S', time.localtime(time.time()))
## print("Thread %s: starting", name)
## camera.start_recording(output = savepath + '/video' +now+ '.h264')
## camera.start_preview(fullscreen=False, window=(100,20,640,480))
## camera.wait_recording(20)
## print("Thread %s: finishing", name)
## camera.stop_preview()
## camera.stop_recording()
def CameraOn():
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format,level=logging.INFO,datefmt="%H:%M:%S")
print("Main : before creating thread")
x = threading.Thread(target=thread_function, args=(1,))
current=1
print("Main : before running thread")
x.start()
print("Main : wait for the thread to finish")
## x.join()
print("Main : all done")
CameraOn()
|
test.py
|
import unittest
import time
import requests
import multiprocessing
import logging
import datetime
import sys
logging.basicConfig(
handlers=[logging.StreamHandler(sys.stdout)],
level=logging.DEBUG,
format='%(asctime)s [%(threadName)s] [%(levelname)s] %(message)s'
)
import authenticated_test_service
import requests_auth
import requests_auth.errors as errors
logger = logging.getLogger(__name__)
class JsonTokenFileCacheTest(unittest.TestCase):
def setUp(self):
self._token_cache = requests_auth.JsonTokenFileCache(self.id() + '.cache')
def tearDown(self):
self._token_cache.clear()
def test_add_bearer_tokens(self):
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token1 = authenticated_test_service.create_token(expiry_in_1_hour)
self._token_cache.add_bearer_token('key1', token1)
expiry_in_2_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=2)
token2 = authenticated_test_service.create_token(expiry_in_2_hour)
self._token_cache.add_bearer_token('key2', token2)
# Assert that tokens can be retrieved properly even after other token were inserted
self.assertEqual(self._token_cache.get_token('key1'), token1)
self.assertEqual(self._token_cache.get_token('key2'), token2)
# Assert that tokens are not removed from the cache on retrieval
self.assertEqual(self._token_cache.get_token('key1'), token1)
self.assertEqual(self._token_cache.get_token('key2'), token2)
def test_save_bearer_tokens(self):
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token1 = authenticated_test_service.create_token(expiry_in_1_hour)
self._token_cache.add_bearer_token('key1', token1)
expiry_in_2_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=2)
token2 = authenticated_test_service.create_token(expiry_in_2_hour)
self._token_cache.add_bearer_token('key2', token2)
same_cache = requests_auth.JsonTokenFileCache(self.id() + '.cache')
self.assertEqual(same_cache.get_token('key1'), token1)
self.assertEqual(same_cache.get_token('key2'), token2)
def test_missing_token(self):
with self.assertRaises(errors.AuthenticationFailed):
self._token_cache.get_token('key1')
def test_missing_token_function(self):
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = authenticated_test_service.create_token(expiry_in_1_hour)
retrieved_token = self._token_cache.get_token('key1', lambda: ('key1', token))
self.assertEqual(retrieved_token, token)
class AzureADTest(unittest.TestCase):
def test_corresponding_oauth2_implicit_flow_instance(self):
aad = requests_auth.AzureActiveDirectoryImplicit(
'45239d18-c68c-4c47-8bdd-ce71ea1d50cd',
'54239d18-c68c-4c47-8bdd-ce71ea1d50cd'
)
self.assertRegex(aad.grant_details.url,
'https://login.microsoftonline.com/45239d18-c68c-4c47-8bdd-ce71ea1d50cd/oauth2/authorize?'
'client_id=54239d18-c68c-4c47-8bdd-ce71ea1d50cd'
'&response_type=token'
'&state=900fe3bb417d9c729361548bc6d3f83ad881e0b030ac27b2b563ee44ddf563c368612e8ee5b483f43667e897c96551388f6dfbdef83558ba2d6367d3b40d0496'
'&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F'
'&nonce=%5B%27.*-.*-.*-.*-.*%27%5D'.replace('?', '\?'))
self.assertRegex(str(aad),
"OAuth2Implicit("
"'https://login.microsoftonline.com/45239d18-c68c-4c47-8bdd-ce71ea1d50cd/oauth2/authorize', "
"client_id='54239d18-c68c-4c47-8bdd-ce71ea1d50cd', "
"nonce='.*-.*-.*-.*-.*')".replace('(', '\(').replace(')', '\)'))
def test_corresponding_oauth2_implicit_flow_instance_using_helper(self):
aad = requests_auth.aad(
requests_auth.OAuth2Flow.Implicit,
'45239d18-c68c-4c47-8bdd-ce71ea1d50cd',
'54239d18-c68c-4c47-8bdd-ce71ea1d50cd'
)
self.assertRegex(aad.grant_details.url,
'https://login.microsoftonline.com/45239d18-c68c-4c47-8bdd-ce71ea1d50cd/oauth2/authorize?'
'client_id=54239d18-c68c-4c47-8bdd-ce71ea1d50cd'
'&response_type=token'
'&state=900fe3bb417d9c729361548bc6d3f83ad881e0b030ac27b2b563ee44ddf563c368612e8ee5b483f43667e897c96551388f6dfbdef83558ba2d6367d3b40d0496'
'&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F'
'&nonce=%5B%27.*-.*-.*-.*-.*%27%5D'.replace('?', '\?'))
self.assertRegex(str(aad),
"OAuth2Implicit("
"'https://login.microsoftonline.com/45239d18-c68c-4c47-8bdd-ce71ea1d50cd/oauth2/authorize', "
"client_id='54239d18-c68c-4c47-8bdd-ce71ea1d50cd', "
"nonce='.*-.*-.*-.*-.*')".replace('(', '\(').replace(')', '\)'))
def test_corresponding_oauth2_implicit_flow_id_token_instance(self):
aad = requests_auth.AzureActiveDirectoryImplicitIdToken(
'45239d18-c68c-4c47-8bdd-ce71ea1d50cd',
'54239d18-c68c-4c47-8bdd-ce71ea1d50cd'
)
self.assertRegex(aad.grant_details.url,
'https://login.microsoftonline.com/45239d18-c68c-4c47-8bdd-ce71ea1d50cd/oauth2/authorize?'
'client_id=54239d18-c68c-4c47-8bdd-ce71ea1d50cd'
'&response_type=id_token'
'&state=c141cf16f45343f37ca8053b6d0c67bad30a777b00221132d5a4514dd23082994e553a9f9fb45224ab9c2da3380047b32948fc2bf233efddc2fbd5801fc1d2d9'
'&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F'
'&nonce=%5B%27.*-.*-.*-.*-.*%27%5D'.replace('?', '\?'))
self.assertRegex(str(aad),
"OAuth2Implicit("
"'https://login.microsoftonline.com/45239d18-c68c-4c47-8bdd-ce71ea1d50cd/oauth2/authorize', "
"client_id='54239d18-c68c-4c47-8bdd-ce71ea1d50cd', "
"response_type='id_token', "
"token_field_name='id_token', "
"nonce='.*-.*-.*-.*-.*')".replace('(', '\(').replace(')', '\)'))
class OktaTest(unittest.TestCase):
def test_corresponding_oauth2_implicit_flow_instance(self):
okta = requests_auth.OktaImplicit(
'testserver.okta-emea.com',
'54239d18-c68c-4c47-8bdd-ce71ea1d50cd'
)
self.assertRegex(okta.grant_details.url,
'https://testserver.okta-emea.com/oauth2/v1/authorize?'
'client_id=54239d18-c68c-4c47-8bdd-ce71ea1d50cd'
'&scope=openid+profile+email'
'&response_type=token'
'&state=f52217fda42a2089f9624cd7a36bb15bff1fb713144cbefbf3ace96c06b0adff46f854c803a41aa09b4b8a6fedf188f4d0ce3f84a6164a6a5db1cd7c004f9d91'
'&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F'
'&nonce=%5B%27.*-.*-.*-.*-.*%27%5D'.replace('?', '\?').replace('+', '\+'))
self.assertRegex(str(okta),
"OAuth2Implicit("
"'https://testserver.okta-emea.com/oauth2/v1/authorize', "
"client_id='54239d18-c68c-4c47-8bdd-ce71ea1d50cd', "
"nonce='.*-.*-.*-.*-.*', "
"scope='openid profile email')".replace('(', '\(').replace(')', '\)'))
def test_corresponding_oauth2_implicit_flow_instance_using_helper(self):
okta = requests_auth.okta(
requests_auth.OAuth2Flow.Implicit,
'testserver.okta-emea.com',
'54239d18-c68c-4c47-8bdd-ce71ea1d50cd'
)
self.assertRegex(okta.grant_details.url,
'https://testserver.okta-emea.com/oauth2/v1/authorize?'
'client_id=54239d18-c68c-4c47-8bdd-ce71ea1d50cd'
'&scope=openid+profile+email'
'&response_type=token'
'&state=f52217fda42a2089f9624cd7a36bb15bff1fb713144cbefbf3ace96c06b0adff46f854c803a41aa09b4b8a6fedf188f4d0ce3f84a6164a6a5db1cd7c004f9d91'
'&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F'
'&nonce=%5B%27.*-.*-.*-.*-.*%27%5D'.replace('?', '\?').replace('+', '\+'))
self.assertRegex(str(okta),
"OAuth2Implicit("
"'https://testserver.okta-emea.com/oauth2/v1/authorize', "
"client_id='54239d18-c68c-4c47-8bdd-ce71ea1d50cd', "
"nonce='.*-.*-.*-.*-.*', "
"scope='openid profile email')".replace('(', '\(').replace(')', '\)'))
def test_corresponding_oauth2_implicit_flow_id_token_instance(self):
okta = requests_auth.OktaImplicitIdToken(
'testserver.okta-emea.com',
'54239d18-c68c-4c47-8bdd-ce71ea1d50cd'
)
self.assertRegex(okta.grant_details.url,
'https://testserver.okta-emea.com/oauth2/v1/authorize?'
'client_id=54239d18-c68c-4c47-8bdd-ce71ea1d50cd'
'&response_type=id_token'
'&scope=openid+profile+email'
'&state=da5a9f82a677a9b3bf19ce2f063f336f1968b8960d4626b35f7d4c0aee68e48ae1a5d5994dc78c3deb043d0e431c5be0bb084c8ac39bd41d670780306329d5a8'
'&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F'
'&nonce=%5B%27.*-.*-.*-.*-.*%27%5D'.replace('?', '\?').replace('+', '\+'))
self.assertRegex(str(okta),
"OAuth2Implicit("
"'https://testserver.okta-emea.com/oauth2/v1/authorize', "
"client_id='54239d18-c68c-4c47-8bdd-ce71ea1d50cd', "
"response_type='id_token', "
"token_field_name='id_token', "
"nonce='.*-.*-.*-.*-.*', "
"scope='openid profile email')".replace('(', '\(').replace(')', '\)'))
TEST_SERVICE_PORT = 5001 # TODO Should use a method to retrieve a free port instead
TEST_SERVICE_HOST = 'http://localhost:{0}'.format(TEST_SERVICE_PORT)
TIMEOUT = 10
def call(auth):
return requests.get(TEST_SERVICE_HOST + '/get_headers', auth=auth, timeout=TIMEOUT)
def get_header(auth):
response = requests.get(TEST_SERVICE_HOST + '/get_headers', auth=auth, timeout=TIMEOUT)
response.raise_for_status()
return dict(response.json())
def get_query_args(auth):
response = requests.get(TEST_SERVICE_HOST + '/get_query_args', auth=auth, timeout=TIMEOUT)
response.raise_for_status()
return response.json()
def can_connect_to_server():
try:
resp = requests.get('http://localhost:{0}/status'.format(TEST_SERVICE_PORT), timeout=0.5)
return resp.status_code == 200
except:
return False
def _wait_for_server_to_be_started():
for attempt in range(3):
if can_connect_to_server():
logger.info('Test server is started')
break
logger.info('Test server still not started...')
else:
raise Exception('Test server was not able to start.')
class AuthenticationTest(unittest.TestCase):
test_service_process = multiprocessing.Process(
target=authenticated_test_service.start_server, args=(TEST_SERVICE_PORT,)
)
@classmethod
def setUpClass(cls):
cls.test_service_process.start()
_wait_for_server_to_be_started()
@classmethod
def tearDownClass(cls):
cls.test_service_process.terminate()
cls.test_service_process.join(timeout=0.5)
def setUp(self):
requests_auth.OAuth2.token_cache.clear()
def test_oauth2_implicit_flow_url_is_mandatory(self):
with self.assertRaises(Exception) as cm:
requests_auth.OAuth2Implicit(None)
self.assertEqual(str(cm.exception), 'Authorization URL is mandatory.')
def test_oauth2_implicit_flow_token_is_not_reused_if_a_url_parameter_is_changing(self):
auth1 = requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_token_as_custom_token?response_type=custom_token&fake_param=1',
timeout=TIMEOUT,
token_field_name='custom_token'
)
token_on_auth1 = get_header(auth1).get('Authorization')
self.assertRegex(token_on_auth1, '^Bearer .*')
# Ensure that the new generated token will be different than previous one
time.sleep(1)
logger.info('Requesting a custom token with a different parameter in URL.')
auth2 = requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_token_as_custom_token?response_type=custom_token&fake_param=2',
timeout=TIMEOUT,
token_field_name='custom_token'
)
token_on_auth2 = get_header(auth2).get('Authorization')
self.assertRegex(token_on_auth2, '^Bearer .*')
self.assertNotEqual(token_on_auth1, token_on_auth2)
def test_oauth2_implicit_flow_token_is_reused_if_only_nonce_differs(self):
auth1 = requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_token_as_custom_token?response_type=custom_token&nonce=1',
timeout=TIMEOUT,
token_field_name='custom_token'
)
token_on_auth1 = get_header(auth1).get('Authorization')
self.assertRegex(token_on_auth1, '^Bearer .*')
auth2 = requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_token_as_custom_token?response_type=custom_token&nonce=2',
timeout=TIMEOUT,
token_field_name='custom_token'
)
token_on_auth2 = get_header(auth2).get('Authorization')
self.assertRegex(token_on_auth2, '^Bearer .*')
self.assertEqual(token_on_auth1, token_on_auth2)
def test_oauth2_implicit_flow_token_can_be_requested_on_a_custom_server_port(self):
auth = requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_token_as_access_token',
# TODO Should use a method to retrieve a free port instead
redirect_uri_port=5002,
timeout=TIMEOUT
)
self.assertRegex(get_header(auth).get('Authorization'), '^Bearer .*')
def test_oauth2_implicit_flow_post_token_is_sent_in_authorization_header_by_default(self):
auth = requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_token_as_access_token',
timeout=TIMEOUT
)
self.assertRegex(get_header(auth).get('Authorization'), '^Bearer .*')
def test_oauth2_implicit_flow_get_token_is_sent_in_authorization_header_by_default(self):
auth = requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_token_as_anchor_access_token',
timeout=TIMEOUT
)
self.assertRegex(get_header(auth).get('Authorization'), '^Bearer .*')
def test_oauth2_implicit_flow_token_is_sent_in_requested_field(self):
auth = requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_token_as_access_token',
timeout=TIMEOUT,
header_name='Bearer',
header_value='{token}'
)
self.assertIsNotNone(get_header(auth).get('Bearer'))
def test_oauth2_implicit_flow_can_send_a_custom_response_type_and_expects_token_to_be_received_with_this_name(self):
auth = requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_token_as_custom_token',
timeout=TIMEOUT,
response_type='custom_token',
token_field_name='custom_token',
)
self.assertRegex(get_header(auth).get('Authorization'), '^Bearer .*')
def test_oauth2_implicit_flow_expects_token_in_id_token_if_response_type_is_id_token(self):
auth = requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_token_as_id_token',
timeout=TIMEOUT,
response_type='id_token',
)
self.assertRegex(get_header(auth).get('Authorization'), '^Bearer .*')
def test_oauth2_implicit_flow_expects_token_in_id_token_if_response_type_in_url_is_id_token(self):
auth = requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_token_as_id_token?response_type=id_token',
timeout=TIMEOUT,
)
self.assertRegex(get_header(auth).get('Authorization'), '^Bearer .*')
def test_oauth2_implicit_flow_expects_token_to_be_stored_in_access_token_by_default(self):
auth = requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_token_as_access_token',
timeout=TIMEOUT
)
self.assertRegex(get_header(auth).get('Authorization'), '^Bearer .*')
def test_oauth2_implicit_flow_token_is_reused_if_not_expired(self):
auth1 = requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_token_as_access_token',
timeout=TIMEOUT
)
token1 = get_header(auth1).get('Authorization')
self.assertRegex(token1, '^Bearer .*')
oauth2 = requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_token_as_access_token',
timeout=TIMEOUT
)
token2 = get_header(oauth2).get('Authorization')
self.assertRegex(token2, '^Bearer .*')
# As the token should not be expired, this call should use the same token
self.assertEqual(token1, token2)
def test_oauth2_implicit_flow_post_failure_if_token_is_not_provided(self):
with self.assertRaises(Exception) as cm:
call(requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/do_not_provide_token',
timeout=TIMEOUT)
)
self.assertEqual('access_token not provided within {}.', str(cm.exception))
def test_oauth2_implicit_flow_get_failure_if_token_is_not_provided(self):
with self.assertRaises(Exception) as cm:
call(requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/do_not_provide_token_as_anchor_token',
timeout=TIMEOUT
))
self.assertEqual("access_token not provided within {}.", str(cm.exception))
def test_oauth2_implicit_flow_post_failure_if_state_is_not_provided(self):
with self.assertRaises(Exception) as cm:
call(requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_token_as_access_token_but_without_providing_state',
timeout=TIMEOUT
),)
self.assertRegex(str(cm.exception), "state not provided within {'access_token': \['.*'\]}.")
def test_oauth2_implicit_flow_get_failure_if_state_is_not_provided(self):
with self.assertRaises(Exception) as cm:
call(requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_token_as_anchor_access_token_but_without_providing_state',
timeout=TIMEOUT
),)
self.assertRegex(str(cm.exception), "state not provided within {'access_token': \['.*'\]}.")
def test_oauth2_implicit_flow_failure_if_token_is_not_received_within_the_timeout_interval(self):
with self.assertRaises(Exception) as cm:
call(requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/do_not_redirect',
timeout=TIMEOUT
))
self.assertEqual('User authentication was not received within {timeout} seconds.'.
format(timeout=TIMEOUT), str(cm.exception))
def test_oauth2_implicit_flow_token_is_requested_again_if_expired(self):
# This token will expires in 1 seconds
auth1 = requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_a_token_expiring_in_1_second',
timeout=TIMEOUT
)
token1 = get_header(auth1).get('Authorization')
self.assertRegex(token1, '^Bearer .*')
# Wait for 2 seconds to ensure that the token expiring in 1 seconds will be considered as expired
time.sleep(2)
# Token should now be expired, a new one should be requested
auth2 = requests_auth.OAuth2Implicit(
TEST_SERVICE_HOST + '/provide_a_token_expiring_in_1_second',
timeout=TIMEOUT
)
token2 = get_header(auth2).get('Authorization')
self.assertRegex(token2, '^Bearer .*')
self.assertNotEqual(token1, token2)
def test_oauth2_authorization_code_flow_get_code_is_sent_in_authorization_header_by_default(self):
auth = requests_auth.OAuth2AuthorizationCode(
TEST_SERVICE_HOST + '/provide_code_as_anchor_code',
TEST_SERVICE_HOST + '/provide_access_token',
timeout=TIMEOUT
)
self.assertRegex(get_header(auth).get('Authorization'), '^Bearer 2YotnFZFEjr1zCsicMWpAA')
def test_oauth2_password_credentials_flow_token_is_sent_in_authorization_header_by_default(self):
auth = requests_auth.OAuth2ResourceOwnerPasswordCredentials(
TEST_SERVICE_HOST + '/provide_access_token',
username='test_user',
password='test_pwd',
timeout=TIMEOUT
)
self.assertRegex(get_header(auth).get('Authorization'), '^Bearer 2YotnFZFEjr1zCsicMWpAA')
def test_oauth2_client_credentials_flow_token_is_sent_in_authorization_header_by_default(self):
auth = requests_auth.OAuth2ClientCredentials(
TEST_SERVICE_HOST + '/provide_access_token',
username='test_user',
password='test_pwd',
timeout=TIMEOUT
)
self.assertRegex(get_header(auth).get('Authorization'), '^Bearer 2YotnFZFEjr1zCsicMWpAA')
def test_header_api_key_requires_an_api_key(self):
with self.assertRaises(Exception) as cm:
requests_auth.HeaderApiKey(None)
self.assertEqual('API Key is mandatory.', str(cm.exception))
def test_query_api_key_requires_an_api_key(self):
with self.assertRaises(Exception) as cm:
requests_auth.QueryApiKey(None)
self.assertEqual('API Key is mandatory.', str(cm.exception))
def test_header_api_key_is_sent_in_X_Api_Key_by_default(self):
auth = requests_auth.HeaderApiKey('my_provided_api_key')
self.assertEqual(get_header(auth).get('X-Api-Key'), 'my_provided_api_key')
def test_query_api_key_is_sent_in_api_key_by_default(self):
auth = requests_auth.QueryApiKey('my_provided_api_key')
self.assertEqual(get_query_args(auth).get('api_key'), 'my_provided_api_key')
def test_header_api_key_can_be_sent_in_a_custom_field_name(self):
auth = requests_auth.HeaderApiKey('my_provided_api_key', 'X-API-HEADER-KEY')
self.assertEqual(get_header(auth).get('X-Api-Header-Key'), 'my_provided_api_key')
def test_query_api_key_can_be_sent_in_a_custom_field_name(self):
auth = requests_auth.QueryApiKey('my_provided_api_key', 'X-API-QUERY-KEY')
self.assertEqual(get_query_args(auth).get('X-API-QUERY-KEY'), 'my_provided_api_key')
def test_basic_authentication_send_authorization_header(self):
auth = requests_auth.Basic('test_user', 'test_pwd')
self.assertEqual(get_header(auth).get('Authorization'), 'Basic dGVzdF91c2VyOnRlc3RfcHdk')
def test_basic_and_api_key_authentication_can_be_combined(self):
basic_auth = requests_auth.Basic('test_user', 'test_pwd')
api_key_auth = requests_auth.HeaderApiKey('my_provided_api_key')
header = get_header(basic_auth + api_key_auth)
self.assertEqual(header.get('Authorization'), 'Basic dGVzdF91c2VyOnRlc3RfcHdk')
self.assertEqual(header.get('X-Api-Key'), 'my_provided_api_key')
def test_basic_and_api_key_authentication_can_be_combined_deprecated(self):
basic_auth = requests_auth.Basic('test_user', 'test_pwd')
api_key_auth = requests_auth.HeaderApiKey('my_provided_api_key')
header = get_header(requests_auth.Auths(basic_auth, api_key_auth))
self.assertEqual(header.get('Authorization'), 'Basic dGVzdF91c2VyOnRlc3RfcHdk')
self.assertEqual(header.get('X-Api-Key'), 'my_provided_api_key')
if __name__ == '__main__':
unittest.main(buffer=False)
|
python实例手册.py
|
python实例手册
#encoding:utf8
# 设定编码-支持中文
0 说明
手册制作: 雪松 littlepy www.51reboot.com
更新日期: 2020-03-06
欢迎系统运维加入Q群: 198173206 # 加群请回答问题
欢迎运维开发加入Q群: 365534424 # 不定期技术分享
请使用"notepad++"或其它编辑器打开此文档, "alt+0"将函数折叠后方便查阅
请勿删除信息, 转载请说明出处, 抵制不道德行为
错误在所难免, 还望指正!
[python实例手册] [shell实例手册] [LazyManage运维批量管理(shell/python两个版本)]
github更新下载地址: https://github.com/liquanzhou/ops_doc
1 基础
安装python2.7
wget https://www.python.org/ftp/python/2.7.9/Python-2.7.9.tgz
tar xvf Python-2.7.9.tgz
cd Python-2.7.9
./configure --prefix=/usr/local/python27
make
make install
mv /usr/bin/python /usr/bin/python_old
ln -s /usr/local/python27/bin/python /usr/bin/python
python # 查看版本
解决YUM无法使用的问题
vim /usr/bin/yum
vim /usr/bin/repoquery
两文件首行#!/usr/bin/python 替换为老版本python #!/usr/bin/python2.6 注意可能为2.4
pip模块安装
yum install python-pip # centos安装pip
sudo apt-get install python-pip # ubuntu安装pip
pip官方安装脚本
wget https://raw.github.com/pypa/pip/master/contrib/get-pip.py
python get-pip.py
pip编译安装
# https://pypi.python.org/pypi/setuptools
wget http://pypi.python.org/packages/source/s/setuptools/setuptools.tar.gz
tar zxvf setuptools.tar.gz
cd setuptools/
python setup.py build
python setup.py install
# https://pypi.python.org/pypi/ez_setup
tar zxvf ez_setup.tar.gz
cd ez_setup/
python setup.py build
python setup.py install
# https://pypi.python.org/pypi/pip
tar zxvf pip.tar.gz
cd pip/
python setup.py build
python setup.py install
加载环境变量
vim /etc/profile
export PATH=/usr/local/python27/bin:$PATH
. /etc/profile
pip freeze # 查看包版本
pip install -r file # 安装包文件列表
pip install Package # 安装包 pip install requests
pip show --files Package # 查看安装包时安装了哪些文件
pip show --files Package # 查看哪些包有更新
pip install --upgrade Package # 更新一个软件包
pip uninstall Package # 卸载软件包
pip list # 查看pip安装的包及版本
pip install django==1.5 # 指定版本安装
pip install kafka-python -i http://pypi.douban.com/simple --trusted-host pypi.douban.com
python3安装
yum install python36.x86_64 python36-pip
查看帮助
python -c "help('modules')" # 查看python所有模块
import os
for i in dir(os):
print i # 模块的方法
help(os.path) # 方法的帮助
python中关键字
import keyword
keyword.iskeyword(str) # 字符串是否为python关键字
keyword.kwlist # 返回pytho所有关键字
['and', 'as', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif', 'else', 'except', 'exec', 'finally', 'for', 'from', 'global', 'if', 'import', 'in', 'is', 'lambda', 'not', 'or', 'pass', 'print', 'raise', 'return', 'try', 'while', 'with', 'yield']
调试
python -m trace -t aaaaaa.py
strace -p pid # 用系统命令跟踪系统调用
变量
r=r'\n' # 输出时原型打印
u=u'中文' # 定义为unicode编码
global x # 全局变量
a = 0 or 2 or 1 # 布尔运算赋值,a值为True既不处理后面,a值为2. None、字符串''、空元组()、空列表[],空字典{}、0、空字符串都是false
name = raw_input("input:").strip() # 输入字符串变量
num = int(raw_input("input:").strip()) # 输入字符串str转为int型
locals() # 所有局部变量组成的字典
locals().values() # 所有局部变量值的列表
os.popen("date -d @{0} +'%Y-%m-%d %H:%M:%S'".format(12)).read() # 特殊情况引用变量 {0} 代表第一个参数
基于字典的字符串格式化
params = {"server":"mpilgrim", "database":"master", "uid":"sa", "pwd":"secret"}
"%(pwd)s" % params # 'secret'
"%(pwd)s is not a good password for %(uid)s" % params # 'secret is not a good password for sa'
"%(database)s of mind, %(database)s of body" % params # 'master of mind, master of body'
打印
# 字符串 %s 整数 %d 浮点 %f 原样打印 %r
print '字符串: %s 整数: %d 浮点: %f 原样打印: %r' % ('aa',2,1.0,'r')
print 'abc', # 有逗号,代表不换行打印,在次打印会接着本行打印
print '%-10s %s' % ('aaa','bbb') # 左对齐 占10个字符
print '%10s %s' % ('aaa','bbb') # 右对齐 占10个字符
列表
# 列表元素的个数最多 536870912
shoplist = ['apple', 'mango', 'carrot', 'banana']
shoplist[2] = 'aa'
del shoplist[0]
shoplist.insert(4,'www')
shoplist.append('aaa')
shoplist[::-1] # 倒着打印 对字符翻转串有效
shoplist[2::3] # 从第二个开始每隔三个打印
shoplist[:-1] # 排除最后一个
'\t'.join(li) # 将列表转字符串 用字表符分割
sys.path[1:1]=[5] # 在位置1前面插入列表中一个值
list(set(['qwe', 'as', '123', '123'])) # 将列表通过集合去重复
eval("['1','a']") # 将字符串当表达式求值,得到列表
# enumerate 可得到每个值的对应位置
for i, n in enumerate(['a','b','c']):
print i,n
元组
# 不可变
zoo = ('wolf', 'elephant', 'penguin')
字典
ab = { 'Swaroop' : 'swaroopch@byteofpython.info',
'Larry' : 'larry@wall.org',
}
ab['c'] = 80 # 添加字典元素
del ab['Larry'] # 删除字典元素
ab.keys() # 查看所有键值
ab.values() # 打印所有值
ab.has_key('a') # 查看键值是否存在
ab.items() # 返回整个字典列表
复制字典
a = {1: {1: 2, 3: 4}}
b = a
b[1][1] = 8888 # a和b都为 {1: {1: 8888, 3: 4}}
import copy
c = copy.deepcopy(a) # 再次赋值 b[1][1] = 9999 拷贝字典为新的字典,互不干扰
a[2] = copy.deepcopy(a[1]) # 复制出第二个key,互不影响 {1: {1: 2, 3: 4},2: {1: 2, 3: 4}}
迭代器
# 创建迭代接口,而不是原来的对象 支持字符串、列表和字典等序列对象
i = iter('abcd')
print i.next()
s = {'one':1,'two':2,'three':3}
m = iter(s)
print m.next() # 迭代key
流程结构
if判断
# 布尔值操作符 and or not 实现多重判断
if a == b:
print '=='
elif a < b:
print b
else:
print a
while循环
while True:
if a == b:
print "=="
break
print "!="
else:
print 'over'
count=0
while(count<9):
print count
count += 1
for循环
sorted() # 返回一个序列(列表)
zip() # 返回一个序列(列表)
enumerate() # 返回循环列表序列 for i,v in enumerate(['a','b']):
reversed() # 反序迭代器对象
dict.iterkeys() # 通过键迭代
dict.itervalues() # 通过值迭代
dict.iteritems() # 通过键-值对迭代
readline() # 文件迭代
iter(obj) # 得到obj迭代器 检查obj是不是一个序列
iter(a,b) # 重复调用a,直到迭代器的下一个值等于b
for i in range(1, 5):
print i
else:
print 'over'
list = ['a','b','c','b']
for i in range(len(list)):
print list[i]
for x, Lee in enumerate(list):
print "%d %s Lee" % (x+1,Lee)
# enumerate 使用函数得到索引值和对应值
for i, v in enumerate(['tic', 'tac', 'toe']):
print(i, v)
流程结构简写
[ i * 2 for i in [8,-2,5]]
[16,-4,10]
[ i for i in range(8) if i %2 == 0 ]
[0,2,4,6]
tab补全
# vim /usr/lib/python2.7/dist-packages/tab.py
# python startup file
import sys
import readline
import rlcompleter
import atexit
import os
# tab completion
readline.parse_and_bind('tab: complete')
# history file
histfile = os.path.join(os.environ['HOME'], '.pythonhistory')
函数
def printMax(a, b = 1):
if a > b:
print a
return a
else:
print b
return b
x = 5
y = 7
printMax(x, y)
def update(*args,**kwargs):
p=''
for i,t in kwargs.items():
p = p+ '%s=%s,' %(i,str(t))
sql = "update 'user' set (%s) where (%s)" %(args[0],p)
print sql
update('aaa',uu='uu',id=3)
模块
# Filename: mymodule.py
def sayhi():
print 'mymodule'
version = '0.1'
# 使用模块中方法
import mymodule
from mymodule import sayhi, version
mymodule.sayhi() # 使用模块中函数方法
装饰器
# 为已存在的功能添加额外的功能,只在初始化脚本的时候执行一次
#!/usr/bin/env python
def deco(func):
def wrapper(*args, **kwargs):
print "Wrap start"
func(*args, **kwargs)
func(*args, **kwargs)
print "Wrap end\n"
return wrapper
@deco
def foo(x):
print "In foo():"
print "I have a para: %s" % x
@deco
def foo_dict(x,z='dict_para'):
print "In foo_dict:"
print "I have two para, %s and %s" % (x, z)
if __name__ == "__main__":
# 装饰器 @deco 等价于 foo = deco(foo)
foo('x')
foo_dict('x', z='dict_para')
结果
Wrap start
In foo():
I have a para: x
In foo():
I have a para: x
Wrap end
Wrap start
In foo_dict:
I have two para, x and dict_para
In foo_dict:
I have two para, x and dict_para
Wrap end
类对象的方法
__xxx__ # 系统定义名字
__init__ # 实例化初始化类的方法
__all__ = ['xs'] # __all__ 用于模块import导入时限制,定义了只有all内指定的属性、方法、类可被导入,没定义则模块内的所有将被导入
_xxx # _开头的为私有类,只有类对象和子类对象自己能访问到这些变量 不能用 from module import * 导入 class _status:
__xxx # __开头的为类中的私有变量名,只有类对象自己能访问,连子类对象也不能访问到这个数据
class Person:
# 实例化初始化的方法
def __init__(self, name ,age):
self.name = name
self.age = age
print self.name
# 有self此函数为方法
def sayHi(self):
print 'Hello, my name is', self.name
# 对象消逝的时候被调用
def __del__(self):
print 'over'
# 实例化对象
p = Person('Swaroop',23)
# 使用对象方法
p.sayHi()
# 继承
class Teacher(Person):
def __init__(self, name, age, salary):
Person.__init__(self, name, age)
self.salary = salary
print '(Initialized Teacher: %s)' % self.name
def tell(self):
Person.tell(self)
print 'Salary: "%d"' % self.salary
t = Teacher('Mrs. Shrividya', 40, 30000)
getattr(object,name,default)
# 返回object的名称为name的属性的属性值,如果属性name存在,则直接返回其属性值.如果属性name不存在,则触发AttribetError异常或当可选参数default定义时返回default值
class A:
def __init__(self):
self.name = 'zhangjing'
def method(self):
print"method print"
Instance = A()
print getattr(Instance, 'name', 'not find') # 如果Instance 对象中有属性name则打印self.name的值,否则打印'not find'
print getattr(Instance, 'age', 'not find') # 如果Instance 对象中有属性age则打印self.age的值,否则打印'not find'
print getattr(Instance, 'method', 'default') # 如果有方法method,否则打印其地址,否则打印default
print getattr(Instance, 'method', 'default')() # 如果有方法method,运行函数并打印None否则打印default
setattr(object,name,value)
# 设置object的名称为name(type:string)的属性的属性值为value,属性name可以是已存在属性也可以是新属性。
#等同多次 self.name = name 赋值 在外部可以直接把变量和值对应关系传进去
#class Person:
# def __init__(self, name ,age):
# self.name = name
# self.age = age
config = {'name':'name','age','age'}
class Configure(object):
def __init__(self, config):
self.register(config)
def register(self, config):
for key, value in config.items():
if key.upper() == key:
setattr(self, key, value)
模块包
# 文件 ops/fileserver/__init__.py
import readers
import writers
# 每个模块的包中,都有一个 __init__.py 文件,有了这个文件,才能导入这个目录下的module,在导入一个包时 import ops.fileserver ,实际上是导入了它的 __init__.py 文件,可以再 __init__.py 文件中再导入其他的包,或者模块。就不需要将所有的import语句写在一个文件里了,也可以减少代码量,不需要一个个去导入module了。
# __init__.py 有一个重要的变量 __all__ 。有时会需要全部导入,from PackageName import * ,这时 import 就会把注册在包 __init__.py 文件中 __all__ 列表中的子模块和子包导入到当前作用域中来。如:
__all__ = ["Module1", "Module2", "subPackage1", "subPackage2"]
执行模块类中的所有方法
# moniItems.py
import sys, time
import inspect
class mon:
def __init__(self):
self.data = dict()
def run(self):
return self.runAllGet()
def getDisk(self):
return 222
def getCpu(self):
return 111
def runAllGet(self):
for fun in inspect.getmembers(self, predicate=inspect.ismethod):
print fun[0], fun[1]
if fun[0][:3] == 'get':
self.data[fun[0][3:]] = fun[1]()
print self.data
return self.data
# 模块导入使用
from moniItems import mon
m = mon()
m.runAllGet()
文件处理
# 模式: 读'r' 写[清空整个文件]'w' 追加[文件需要存在]'a' 读写'r+' 二进制文件'b' 'rb','wb','rb+'
写文件
i={'ddd':'ccc'}
f = file('poem.txt', 'a')
f.write("string")
f.write(str(i))
f.flush()
f.close()
读文件
f = file('/etc/passwd','r')
c = f.read().strip() # 读取为一个大字符串,并去掉最后一个换行符
for i in c.split('\n'): # 用换行符切割字符串得到列表循环每行
print i
f.close()
读文件1
f = file('/etc/passwd','r')
while True:
line = f.readline() # 返回一行
if len(line) == 0:
break
x = line.split(":") # 冒号分割定义序列
#x = [ x for x in line.split(":") ] # 冒号分割定义序列
#x = [ x.split("/") for x in line.split(":") ] # 先冒号分割,在/分割 打印x[6][1]
print x[6],"\n",
f.close()
读文件2
f = file('/etc/passwd')
c = f.readlines() # 读入所有文件内容,可反复读取,大文件时占用内存较大
for line in c:
print line.rstrip(),
f.close()
读文件3
for i in open('b.txt'): # 直接读取也可迭代,并有利于大文件读取,但不可反复读取
print i,
追加日志
log = open('/home/peterli/xuesong','a')
print >> log,'faaa'
log.close()
with读文件
# 自动关闭文件、线程锁的自动获取和释放等
with open('a.txt') as f:
for i in f:
print i
print f.read() # 打印所有内容为字符串
print f.readlines() # 打印所有内容按行分割的列表
文件随机读写
# 文件本没有换行,一切都是字符,文件也没有插入功能
f.tell() # 当前读写位置
f.read(5) # 读取5个字符并改变指针
f.seek(5) # 改变用户态读写指针偏移位置,可做随机写
f.seek(p,0) # 移动当文件第p个字节处,绝对位置
f.seek(p,1) # 移动到相对于当前位置之后的p个字节
f.seek(p,2) # 移动到相对文件尾之后的p个字节
f.seek(0,2) # 指针指到尾部
# 改变指针超出文件尾部,会造成文件洞,ll看占用较大,但du -sh却非常小
f.read(65535) # 读取64K字节
f.write("str") # 写会覆盖当前指针后的响应字符,无插入功能
内建函数
dir(sys) # 显示对象的属性
help(sys) # 交互式帮助
int(obj) # 转型为整形
str(obj) # 转为字符串
len(obj) # 返回对象或序列长度
open(file,mode) # 打开文件 #mode (r 读,w 写, a追加)
range(0,3) # 返回一个整形列表
raw_input("str:") # 等待用户输入
type(obj) # 返回对象类型
abs(-22) # 绝对值
random # 随机数
choice() # 随机返回给定序列的一个元素
divmod(x,y) # 函数完成除法运算,返回商和余数。
round(x[,n]) # 函数返回浮点数x的四舍五入值,如给出n值,则代表舍入到小数点后的位数
strip() # 是去掉字符串两端多于空格,该句是去除序列中的所有字串两端多余的空格
del # 删除列表里面的数据
cmp(x,y) # 比较两个对象 #根据比较结果返回一个整数,如果x<y,则返回-1;如果x>y,则返回1,如果x==y则返回0
max() # 字符串中最大的字符
min() # 字符串中最小的字符
sorted() # 对序列排序
reversed() # 对序列倒序
enumerate() # 返回索引位置和对应的值
sum() # 总和
list() # 变成列表可用于迭代
eval('3+4') # 将字符串当表达式求值 得到7
exec 'a=100' # 将字符串按python语句执行
exec(a+'=new') # 将变量a的值作为新的变量
tuple() # 变成元组可用于迭代 #一旦初始化便不能更改的数据结构,速度比list快
zip(s,t) # 返回一个合并后的列表 s = ['11','22'] t = ['aa','bb'] [('11', 'aa'), ('22', 'bb')]
isinstance(object,int) # 测试对象类型 int
xrange([lower,]stop[,step]) # 函数与range()类似,但xrnage()并不创建列表,而是返回一个xrange对象
列表类型内建函数
list.append(obj) # 向列表中添加一个对象obj
list.count(obj) # 返回一个对象obj在列表中出现的次数
list.extend(seq) # 把序列seq的内容添加到列表中
list.index(obj,i=0,j=len(list)) # 返回list[k] == obj 的k值,并且k的范围在i<=k<j;否则异常
list.insert(index.obj) # 在索引量为index的位置插入对象obj
list.pop(index=-1) # 删除并返回指定位置的对象,默认是最后一个对象
list.remove(obj) # 从列表中删除对象obj
list.reverse() # 原地翻转列表
list.sort(func=None,key=None,reverse=False) # 以指定的方式排序列表中成员,如果func和key参数指定,则按照指定的方式比较各个元素,如果reverse标志被置为True,则列表以反序排列
序列类型操作符
seq[ind] # 获取下标为ind的元素
seq[ind1:ind2] # 获得下标从ind1到ind2的元素集合
seq * expr # 序列重复expr次
seq1 + seq2 # 连接seq1和seq2
obj in seq # 判断obj元素是否包含在seq中
obj not in seq # 判断obj元素是否不包含在seq中
字符串类型内建方法
string.expandtabs(tabsize=8) # tab符号转为空格 #默认8个空格
string.endswith(obj,beg=0,end=len(staring)) # 检测字符串是否已obj结束,如果是返回True #如果beg或end指定检测范围是否已obj结束
string.count(str,beg=0,end=len(string)) # 检测str在string里出现次数 f.count('\n',0,len(f)) 判断文件行数
string.find(str,beg=0,end=len(string)) # 检测str是否包含在string中
string.index(str,beg=0,end=len(string)) # 检测str不在string中,会报异常
string.isalnum() # 如果string至少有一个字符并且所有字符都是字母或数字则返回True
string.isalpha() # 如果string至少有一个字符并且所有字符都是字母则返回True
string.isnumeric() # 如果string只包含数字字符,则返回True
string.isspace() # 如果string包含空格则返回True
string.isupper() # 字符串都是大写返回True
string.islower() # 字符串都是小写返回True
string.lower() # 转换字符串中所有大写为小写
string.upper() # 转换字符串中所有小写为大写
string.lstrip() # 去掉string左边的空格
string.rstrip() # 去掉string字符末尾的空格
string.replace(str1,str2) # 把string中的str1替换成str2,如果num指定,则替换不超过num次
string.startswith(obj,beg=0,end=len(string)) # 检测字符串是否以obj开头
string.zfill(width) # 返回字符长度为width的字符,原字符串右对齐,前面填充0
string.isdigit() # 只包含数字返回True
string.split("/") # 把string切片成一个列表
":".join(string.split()) # 以:作为分隔符,将所有元素合并为一个新的字符串
字典内建方法
dict.clear() # 删除字典中所有元素
dict copy() # 返回字典(浅复制)的一个副本
dict.fromkeys(seq,val=None) # 创建并返回一个新字典,以seq中的元素做该字典的键,val做该字典中所有键对的初始值
dict.get(key,default=None) # 对字典dict中的键key,返回它对应的值value,如果字典中不存在此键,则返回default值
dict.has_key(key) # 如果键在字典中存在,则返回True 用in和not in代替
dict.items() # 返回一个包含字典中键、值对元组的列表
dict.keys() # 返回一个包含字典中键的列表
dict.iter() # 方法iteritems()、iterkeys()、itervalues()与它们对应的非迭代方法一样,不同的是它们返回一个迭代子,而不是一个列表
dict.pop(key[,default]) # 和方法get()相似.如果字典中key键存在,删除并返回dict[key]
dict.setdefault(key,default=None) # 和set()相似,但如果字典中不存在key键,由dict[key]=default为它赋值
dict.update(dict2) # 将字典dict2的键值对添加到字典dict
dict.values() # 返回一个包含字典中所有值得列表
dict([container]) # 创建字典的工厂函数。提供容器类(container),就用其中的条目填充字典
len(mapping) # 返回映射的长度(键-值对的个数)
hash(obj) # 返回obj哈希值,判断某个对象是否可做一个字典的键值
集合方法
s.update(t) # 用t中的元素修改s,s现在包含s或t的成员 s |= t
s.intersection_update(t) # s中的成员是共用属于s和t的元素 s &= t
s.difference_update(t) # s中的成员是属于s但不包含在t中的元素 s -= t
s.symmetric_difference_update(t) # s中的成员更新为那些包含在s或t中,但不是s和t共有的元素 s ^= t
s.add(obj) # 在集合s中添加对象obj
s.remove(obj) # 从集合s中删除对象obj;如果obj不是集合s中的元素(obj not in s),将引发KeyError错误
s.discard(obj) # 如果obj是集合s中的元素,从集合s中删除对象obj
s.pop() # 删除集合s中的任意一个对象,并返回它
s.clear() # 删除集合s中的所有元素
s.issubset(t) # 如果s是t的子集,则返回True s <= t
s.issuperset(t) # 如果t是s的超集,则返回True s >= t
s.union(t) # 合并操作;返回一个新集合,该集合是s和t的并集 s | t
s.intersection(t) # 交集操作;返回一个新集合,该集合是s和t的交集 s & t
s.difference(t) # 返回一个新集合,改集合是s的成员,但不是t的成员 s - t
s.symmetric_difference(t) # 返回一个新集合,该集合是s或t的成员,但不是s和t共有的成员 s ^ t
s.copy() # 返回一个新集合,它是集合s的浅复制
obj in s # 成员测试;obj是s中的元素 返回True
obj not in s # 非成员测试:obj不是s中元素 返回True
s == t # 等价测试 是否具有相同元素
s != t # 不等价测试
s < t # 子集测试;s!=t且s中所有元素都是t的成员
s > t # 超集测试;s!=t且t中所有元素都是s的成员
序列化
#!/usr/bin/python
import cPickle
obj = {'1':['4124','1241','124'],'2':['12412','142','1241']}
pkl_file = open('account.pkl','wb')
cPickle.dump(obj,pkl_file)
pkl_file.close()
pkl_file = open('account.pkl','rb')
account_list = cPickle.load(pkl_file)
pkl_file.close()
文件对象方法
file.close() # 关闭文件
file.fileno() # 返回文件的描述符
file.flush() # 刷新文件的内部缓冲区
file.isatty() # 判断file是否是一个类tty设备
file.next() # 返回文件的下一行,或在没有其他行时引发StopIteration异常
file.read(size=-1) # 从文件读取size个字节,当未给定size或给定负值的时候,读取剩余的所有字节,然后作为字符串返回
file.readline(size=-1) # 从文件中读取并返回一行(包括行结束符),或返回最大size个字符
file.readlines(sizhint=0) # 读取文件的所有行作为一个列表返回
file.xreadlines() # 用于迭代,可替换readlines()的一个更高效的方法
file.seek(off, whence=0) # 在文件中移动文件指针,从whence(0代表文件起始,1代表当前位置,2代表文件末尾)偏移off字节
file.tell() # 返回当前在文件中的位置
file.truncate(size=file.tell()) # 截取文件到最大size字节,默认为当前文件位置
file.write(str) # 向文件写入字符串
file.writelines(seq) # 向文件写入字符串序列seq;seq应该是一个返回字符串的可迭代对象
文件对象的属性
file.closed # 表示文件已被关闭,否则为False
file.encoding # 文件所使用的编码 当unicode字符串被写入数据时,它将自动使用file.encoding转换为字节字符串;若file.encoding为None时使用系统默认编码
file.mode # Access文件打开时使用的访问模式
file.name # 文件名
file.newlines # 未读取到行分隔符时为None,只有一种行分隔符时为一个字符串,当文件有多种类型的行结束符时,则为一个包含所有当前所遇到的行结束符的列表
file.softspace # 为0表示在输出一数据后,要加上一个空格符,1表示不加
异常处理
# try 中使用 sys.exit(2) 会被捕获,无法退出脚本,可使用 os._exit(2) 退出脚本
class ShortInputException(Exception): # 继承Exception异常的类,定义自己的异常
def __init__(self, length, atleast):
Exception.__init__(self)
self.length = length
self.atleast = atleast
try:
s = raw_input('Enter something --> ')
if len(s) < 3:
raise ShortInputException(len(s), 3) # 触发异常
except EOFError:
print '\nWhy did you do an EOF on me?'
except ShortInputException, x: # 捕捉指定错误信息
print 'ShortInputException: %d | %d' % (x.length, x.atleast)
except Exception as err: # 捕捉所有其它错误信息内容
print str(err)
#except urllib2.HTTPError as err: # 捕捉外部导入模块的错误
#except: # 捕捉所有其它错误 不会看到错误内容
# print 'except'
finally: # 无论什么情况都会执行 关闭文件或断开连接等
print 'finally'
else: # 无任何异常 无法和finally同用
print 'No exception was raised.'
不可捕获的异常
NameError: # 尝试访问一个未申明的变量
ZeroDivisionError: # 除数为零
SyntaxErrot: # 解释器语法错误
IndexError: # 请求的索引元素超出序列范围
KeyError: # 请求一个不存在的字典关键字
IOError: # 输入/输出错误
AttributeError: # 尝试访问未知的对象属性
ImportError # 没有模块
IndentationError # 语法缩进错误
KeyboardInterrupt # ctrl+C
SyntaxError # 代码语法错误
ValueError # 值错误
TypeError # 传入对象类型与要求不符合
内建异常
BaseException # 所有异常的基类
SystemExit # python解释器请求退出
KeyboardInterrupt # 用户中断执行
Exception # 常规错误的基类
StopIteration # 迭代器没有更多的值
GeneratorExit # 生成器发生异常来通知退出
StandardError # 所有的内建标准异常的基类
ArithmeticError # 所有数值计算错误的基类
FloatingPointError # 浮点计算错误
OverflowError # 数值运算超出最大限制
AssertionError # 断言语句失败
AttributeError # 对象没有这个属性
EOFError # 没有内建输入,到达EOF标记
EnvironmentError # 操作系统错误的基类
IOError # 输入/输出操作失败
OSError # 操作系统错误
WindowsError # windows系统调用失败
ImportError # 导入模块/对象失败
KeyboardInterrupt # 用户中断执行(通常是ctrl+c)
LookupError # 无效数据查询的基类
IndexError # 序列中没有此索引(index)
KeyError # 映射中没有这个键
MemoryError # 内存溢出错误(对于python解释器不是致命的)
NameError # 未声明/初始化对象(没有属性)
UnboundLocalError # 访问未初始化的本地变量
ReferenceError # 若引用试图访问已经垃圾回收了的对象
RuntimeError # 一般的运行时错误
NotImplementedError # 尚未实现的方法
SyntaxError # python语法错误
IndentationError # 缩进错误
TabError # tab和空格混用
SystemError # 一般的解释器系统错误
TypeError # 对类型无效的操作
ValueError # 传入无效的参数
UnicodeError # Unicode相关的错误
UnicodeDecodeError # Unicode解码时的错误
UnicodeEncodeError # Unicode编码时的错误
UnicodeTranslateError # Unicode转换时错误
Warning # 警告的基类
DeprecationWarning # 关于被弃用的特征的警告
FutureWarning # 关于构造将来语义会有改变的警告
OverflowWarning # 旧的关于自动提升为长整形的警告
PendingDeprecationWarning # 关于特性将会被废弃的警告
RuntimeWarning # 可疑的运行时行为的警告
SyntaxWarning # 可疑的语法的警告
UserWarning # 用户代码生成的警告
触发异常
raise exclass # 触发异常,从exclass生成一个实例(不含任何异常参数)
raise exclass() # 触发异常,但现在不是类;通过函数调用操作符(function calloperator:"()")作用于类名生成一个新的exclass实例,同样也没有异常参数
raise exclass, args # 触发异常,但同时提供的异常参数args,可以是一个参数也可以是元组
raise exclass(args) # 触发异常,同上
raise exclass, args, tb # 触发异常,但提供一个跟踪记录(traceback)对象tb供使用
raise exclass,instance # 通过实例触发异常(通常是exclass的实例)
raise instance # 通过实例触发异常;异常类型是实例的类型:等价于raise instance.__class__, instance
raise string # 触发字符串异常
raise string, srgs # 触发字符串异常,但触发伴随着args
raise string,args,tb # 触发字符串异常,但提供一个跟踪记录(traceback)对象tb供使用
raise # 重新触发前一个异常,如果之前没有异常,触发TypeError
跟踪异常栈
# traceback 获取异常相关数据都是通过sys.exc_info()函数得到的
import traceback
import sys
try:
s = raw_input()
print int(s)
except ValueError:
# sys.exc_info() 返回值是元组,第一个exc_type是异常的对象类型,exc_value是异常的值,exc_tb是一个traceback对象,对象中包含出错的行数、位置等数据
exc_type, exc_value, exc_tb = sys.exc_info()
print "\n%s \n %s \n %s\n" %(exc_type, exc_value, exc_tb )
traceback.print_exc() # 打印栈跟踪信息
抓取全部错误信息存如字典
import sys, traceback
try:
s = raw_input()
int(s)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback_details = {
'filename': exc_traceback.tb_frame.f_code.co_filename,
'lineno' : exc_traceback.tb_lineno,
'name' : exc_traceback.tb_frame.f_code.co_name,
'type' : exc_type.__name__,
'message' : exc_value.message,
}
del(exc_type, exc_value, exc_traceback)
print traceback_details
f = file('test1.txt', 'a')
f.write("%s %s %s %s %s\n" %(traceback_details['filename'],traceback_details['lineno'],traceback_details['name'],traceback_details['type'],traceback_details['message'], ))
f.flush()
f.close()
调试log
# cgitb覆盖了默认sys.excepthook全局异常拦截器
def func(a, b):
return a / b
if __name__ == '__main__':
import cgitb
cgitb.enable(format='text')
func(1, 0)
函数式编程的内建函数
apply(func[,nkw][,kw]) # 用可选的参数来调用func,nkw为非关键字参数,kw为关键字参数;返回值是函数调用的返回值
filter(func,seq) # 调用一个布尔函数func来迭代遍历每个seq中的元素;返回一个使func返回值为true的元素的序列
map(func,seq1[,seq2]) # 将函数func作用于给定序列(s)的每个元素,并用一个列表来提供返回值;如果func为None,func表现为一个身份函数,返回一个含有每个序列中元素集合的n个元组的列表
reduce(func,seq[,init]) # 将二元函数作用于seq序列的元素,每次携带一堆(先前的结果以及下一个序列元素),连续地将现有的结果和下一个值作用在获得的随后的结果上,最后减少我们的序列为一个单一的返回值;如果初始值init给定,第一个比较会是init和第一个序列元素而不是序列的头两个元素
lambda x,y:x+y # 创建一个匿名函数 可用于上面几种方法中直接创建匿名函数式
# filter 即通过函数方法只保留结果为真的值组成列表
def f(x): return x % 2 != 0 and x % 3 != 0
f(3) # 函数结果是False 3被filter抛弃
f(5) # 函数结果是True 5被加入filter最后的列表结果
filter(f, range(2, 25))
[5, 7, 11, 13, 17, 19, 23]
# map 通过函数对列表进行处理得到新的列表
def cube(x): return x*x*x
map(cube, range(1, 11))
[1, 8, 27, 64, 125, 216, 343, 512, 729, 1000]
# reduce 通过函数会先接收初始值和序列的第一个元素,然后是返回值和下一个元素,依此类推
def add(x,y): return x+y
reduce(add, range(1, 11)) # 结果55 是1到10的和 x的值是上一次函数返回的结果,y是列表中循环的值
reduce(lambda x,y:x+y, range(1,11)) # 等同上面两条 lambda来创建匿名函数[ lambda x,y:x+y ] ,后面跟可迭代的对象
编码转换
a='中文' # 编码未定义按输入终端utf8或gbk
u=u'中文' # 定义为unicode编码 u值为 u'\u4e2d\u6587'
u.encode('utf8') # 转为utf8格式 u值为 '\xe4\xb8\xad\xe6\x96\x87'
print u # 结果显示 中文
print u.encode('utf8') # 转为utf8格式,当显示终端编码为utf8 结果显示 中文 编码不一致则乱码
print u.encode('gbk') # 当前终端为utf8 故乱码
ord('4') # 字符转ASCII码
chr(52) # ASCII码转字符
设置读取编码为utf8 避免转换出错
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
遍历递归
[os.path.join(x[0],y) for x in os.walk('/root/python/5') for y in x[2]]
for i in os.walk('/root/python/5/work/server'):
print i
元类
# 实现动态curd类的或者实例中的方法属性
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Name: metaclass.py
# Author: ZhiPeng Wang.
# Created: 15/8/12
# Copyright: (c) TigerJoys-SA 2015
# -----------------------------------------------------------------------------
"""首先检查__metaclass__属性, 如果设置了此属性, 如果设置了此属性则调用对应Metaclass,
Metaclass本身也是Class 当调用时先调用自身的__new__方法新建一个Instance然后Instance调
用__init__返回一个新对象(MyClss), 然后正常执行原Class
"""
ext_attr = {
'wzp': 'wzp',
'test': 'test',
}
class CustomMeta(type):
build_in_attr = ['name', ]
def __new__(cls, class_name, bases, attributes):
# 获取`Meta` Instance
attr_meta = attributes.pop('Meta', None)
if attr_meta:
for attr in cls.build_in_attr: # 遍历内置属性
# 自省, 获取Meta Attributes 不是build_in_attr的属性不处理
print "Meta:", getattr(attr_meta, attr, False)
# 扩展属性
attributes.update(ext_attr)
return type.__new__(cls, class_name, bases, attributes)
def __init__(cls, class_name, bases, attributes):
super(CustomMeta, cls).__init__(class_name, bases, attributes)
class MyClass(object):
__metaclass__ = CustomMeta # metaclass
class Meta:
name = 'Meta attr'
if __name__ == '__main__':
# TODO 此处返回一个类`Instance`对象
print MyClass()
# TODO 此处返回一个类对象, 并不是`Instance`
print type("MyClass", (), {})
2 常用模块
sys [系统操作模块]
sys.argv # 取参数列表
sys.exit(2) # 退出脚本返回状态 会被try截取
sys.exc_info() # 获取当前正在处理的异常类
sys.version # 获取Python解释程序的版本信息
sys.maxint # 最大的Int值 9223372036854775807
sys.maxunicode # 最大的Unicode值
sys.modules # 返回系统导入的模块字段,key是模块名,value是模块
sys.path # 返回模块的搜索路径,初始化时使用PYTHONPATH环境变量的值
sys.platform # 返回操作系统平台名称
sys.stdout # 标准输出
sys.stdin # 标准输入
sys.stderr # 错误输出
sys.exec_prefix # 返回平台独立的python文件安装的位置
sys.stdin.readline() # 从标准输入读一行
sys.stdout.write("a") # 屏幕输出a
sys.path.insert(1, os.path.join(sys.path[0], '/opt/script/')) # 将/opt/script/目录加入环境变量,可导入相应模块
commands [执行系统操作]
(status, output) = commands.getstatusoutput('cat /proc/cpuinfo')
print status, output
os [系统模块]
# 相对sys模块 os模块更为底层 os._exit() try无法抓取
os.popen('id').read() # 执行系统命令得到返回结果
os.system() # 得到返回状态 返回无法截取
os.name # 返回系统平台 Linux/Unix用户是'posix'
os.getenv() # 读取环境变量
os.environ['A']='1' # 设置环境变量
os.getcwd() # 当前工作路径
os.chdir() # 改变当前工作目录
os.walk('/root/') # 递归路径
os.environ['HOME'] # 查看系统环境变量
os.statvfs("/") # 获取磁盘信息
文件处理
mkfifo()/mknod() # 创建命名管道/创建文件系统节点
remove()/unlink() # 删除文件
rename()/renames() # 重命名文件
stat() # 返回文件信息
symlink() # 创建符号链接
utime() # 更新时间戳
tmpfile() # 创建并打开('w+b')一个新的临时文件
walk() # 遍历目录树下的所有文件名
oct(os.stat('th1.py').st_mode)[-3:] # 查看目录权限
目录/文件夹
chdir()/fchdir() # 改变当前工作目录/通过一个文件描述符改变当前工作目录
chroot() # 改变当前进程的根目录
listdir() # 列出指定目录的文件
getcwd()/getcwdu() # 返回当前工作目录/功能相同,但返回一个unicode对象
mkdir()/makedirs() # 创建目录/创建多层目录
rmdir()/removedirs() # 删除目录/删除多层目录
访问/权限
saccess() # 检验权限模式
chmod('txt',eval("0777")) # 改变权限模式
chown()/lchown() # 改变owner和groupID功能相同,但不会跟踪链接
umask() # 设置默认权限模式
文件描述符操作
open() # 底层的操作系统open(对于稳健,使用标准的内建open()函数)
read()/write() # 根据文件描述符读取/写入数据 按大小读取文件部分内容
dup()/dup2() # 复制文件描述符号/功能相同,但是复制到另一个文件描述符
设备号
makedev() # 从major和minor设备号创建一个原始设备号
major()/minor() # 从原始设备号获得major/minor设备号
os.path模块
os.path.expanduser('~/.ssh/key') # 家目录下文件的全路径
分隔
os.path.basename() # 去掉目录路径,返回文件名
os.path.dirname() # 去掉文件名,返回目录路径
os.path.join() # 将分离的各部分组合成一个路径名
os.path.spllt() # 返回(dirname(),basename())元组
os.path.splitdrive() # 返回(drivename,pathname)元组
os.path.splitext() # 返回(filename,extension)元组
信息
os.path.getatime() # 返回最近访问时间
os.path.getctime() # 返回文件创建时间
os.path.getmtime() # 返回最近文件修改时间
os.path.getsize() # 返回文件大小(字节)
查询
os.path.exists() # 指定路径(文件或目录)是否存在
os.path.isabs() # 指定路径是否为绝对路径
os.path.isdir() # 指定路径是否存在且为一个目录
os.path.isfile() # 指定路径是否存在且为一个文件
os.path.islink() # 指定路径是否存在且为一个符号链接
os.path.ismount() # 指定路径是否存在且为一个挂载点
os.path.samefile() # 两个路径名是否指向同一个文件
子进程
os.fork() # 创建子进程,并复制父进程所有操作 通过判断pid = os.fork() 的pid值,分别执行父进程与子进程操作,0为子进程
os.wait() # 等待子进程结束
跨平台os模块属性
linesep # 用于在文件中分隔行的字符串
sep # 用来分隔文件路径名字的字符串
pathsep # 用于分割文件路径的字符串
curdir # 当前工作目录的字符串名称
pardir # 父目录字符串名称
磁盘空间
import os
disk = os.statvfs("/")
# disk.f_bsize 块大小
# disk.f_blocks 块总数
# disk.f_bfree 剩余块总数
# disk.f_bavail 非root用户的剩余块数 由于权限小会比root的剩余块总数小 用这个做报警会更准确
# disk.f_files 总节点数
# disk.f_ffree 剩余节点数
# disk.f_favail 非root用户的剩余节点数
disk.f_bsize * disk.f_bavail / 1024 / 1024 / 1024 # 非root用户剩余空间大小G
disk.f_bsize * disk.f_blocks / 1024 / 1024 / 1024 # 分区空间总大小
commands [执行系统命令]
(status, output) = commands.getstatusoutput('cat /proc/cpuinfo')
print status, output
commands.getstatusoutput('id') # 返回元组(状态,标准输出)
commands.getoutput('id') # 只返回执行的结果, 忽略返回值
commands.getstatus('file') # 返回ls -ld file执行的结果
re [perl风格正则]
compile(pattern,flags=0) # 对正则表达式模式pattern进行编译,flags是可选标识符,并返回一个regex对象
match(pattern,string,flags=0) # 尝试用正则表达式模式pattern匹配字符串string,flags是可选标识符,如果匹配成功,则返回一个匹配对象;否则返回None
search(pattern,string,flags=0) # 在字符串string中搜索正则表达式模式pattern的第一次出现,flags是可选标识符,如果匹配成功,则返回一个匹配对象;否则返回None
findall(pattern,string[,flags]) # 在字符串string中搜索正则表达式模式pattern的所有(非重复)出现:返回一个匹配对象的列表 # pattern=u'\u4e2d\u6587' 代表UNICODE
finditer(pattern,string[,flags]) # 和findall()相同,但返回的不是列表而是迭代器;对于每个匹配,该迭代器返回一个匹配对象
split(pattern,string,max=0) # 根据正则表达式pattern中的分隔符把字符string分割为一个列表,返回成功匹配的列表,最多分割max次(默认所有)
sub(pattern,repl,string,max=0) # 把字符串string中所有匹配正则表达式pattern的地方替换成字符串repl,如果max的值没有给出,则对所有匹配的地方进行替换(subn()会返回一个表示替换次数的数值)
group(num=0) # 返回全部匹配对象(或指定编号是num的子组)
groups() # 返回一个包含全部匹配的子组的元组(如果没匹配成功,返回一个空元组)
零宽断言
str = 'aaa111aaa , bbb222&, 333ccc'
re.compile('\d+(?=[a-z]+)').findall(str) # 前向界定 (?=exp) 找出连续的数字并且最后一个数字跟着至少一个a-z ['111', '333']
re.compile(r"\d+(?![a-z]+)").findall(str) # 前向否定界定 (?!exp) 找出连续数字,且最后一个数字后不能跟a-z ['11', '222', '33']
re.compile(r"(?<=[a-z])\d+").findall(str) # 反向界定 (?<=exp) 逆序环视 找出连续的数字,且第一个数字前面是a-z ['111', '222']
re.compile(r"(?<![a-z])\d+").findall(str) # 反向否定界定 (?<!exp) 否定逆序环视 找出连续的数字,且第一个数字前不能是a-z ['11', '22', '333']
re.compile(r"(?:\d+)").findall(str) # 无捕获的匹配 (?:exp)
s= 'Tom:9527 , Sharry:0003 '
re.match( r'(?P<name>\w+):(?P<num>\d+)' , s).group(0) # 捕获组 <num>第二个标签变量[9527] 获取 group("num") 等同 group(2)[9527], group(0)全部[Tom:9527]
例子
re.findall(r'a[be]c','123abc456eaec789') # 返回匹配对象列表 ['abc', 'aec']
re.findall("(.)12[34](..)",a) # 取出匹配括号中内容 a='qedqwe123dsf'
re.search("(.)123",a ).group(1) # 搜索匹配的取第1个标签
re.match("^(1|2) *(.*) *abc$", str).group(2) # 取第二个标签
re.match("^(1|2) *(.*) *abc$", str).groups() # 取所有标签
re.sub('[abc]','A','alex') # 替换
for i in re.finditer(r'\d+',s): # 迭代
print i.group(),i.span() #
搜索网页中UNICODE格式的中文
QueryAdd='http://www.anti-spam.org.cn/Rbl/Query/Result'
Ip='222.129.184.52'
s = requests.post(url=QueryAdd, data={'IP':Ip})
re.findall(u'\u4e2d\u56fd', s.text, re.S)
csv [访问csv逗号分隔的文件]
csv读配置文件
192.168.1.5,web # 配置文件按逗号分割
list = csv.reader(file('a.csv'))
for line in list:
print line # ['192.168.1.5', 'web']
csv配合with读文件
import csv
with open('some.csv', 'rb') as f:
reader = csv.reader(f)
for row in reader:
print row
csv配合with写文件
import csv
with open('some.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerow(['Column1', 'Column2', 'Column3']) # 写单行 列表
writer.writerows([range(3) for i in range(5)]) # 写多行 列表套列表
shutil [提供高级文件访问功能]
import shutil
shutil.copyfile('data.db', 'archive.db') # 拷贝文件
shutil.move('/build/executables', 'installdir') # 移动文件或目录
dircache [目录文件列表缓存]
import dircache
a = dircache.listdir('/data/xuesong') # 列出目录下所有的文件和目录
dircache.annotate('/data/xuesong', a) # 判断指定目录下的是文件还是目录,目录则后面加/ 文件或不存在则不改变
glob [文件通配符]
import glob
glob.glob('*.py') # 查找当前目录下py结尾的文件
random [随机模块]
import random
random.choice(['apple', 'pear', 'banana']) # 随机取列表一个参数
random.sample(xrange(100), 10) # 不重复抽取10个
random.random() # 随机浮点数
random.randrange(6) # 随机整数范围
tempfile [创建临时文件]
import os
import tempfile
temp = tempfile.TemporaryFile() # 定义一个临时文件对象
try:
temp.write('Some data') # 写入数据
temp.writelines(['first\n', 'second\n']) # 写入多行
temp.seek(0) # 写入
print temp.read() # 读取
for line in temp: # 循环读取每一行
print line.rstrip()
finally:
temp.close() # 关闭后删除临时文件
# 创建临时目录
import os
import tempfile
directory_name = tempfile.mkdtemp()
print directory_name # 打印临时目录地址 /var/folders...
# Clean up the directory yourself
os.removedirs(directory_name) # 创建临时目录需要手动删除
# 控制临时文件名
import tempfile
temp = tempfile.NamedTemporaryFile(suffix='_suffix', prefix='prefix_', dir='/tmp')
try:
print 'temp:', temp
print 'temp.name:', temp.name
finally:
temp.close()
email [发送邮件]
发送邮件内容
#!/usr/bin/python
#encoding:utf8
# 导入 smtplib 和 MIMEText
import smtplib
from email.mime.text import MIMEText
# 定义发送列表
mailto_list=["272121935@qq.com","272121935@163.com"]
# 设置服务器名称、用户名、密码以及邮件后缀
mail_host = "smtp.163.com"
mail_user = "mailuser"
mail_pass = "password"
mail_postfix="163.com"
# 发送邮件函数
def send_mail(to_list, sub):
me = mail_user + "<"+mail_user+"@"+mail_postfix+">"
fp = open('context.txt')
msg = MIMEText(fp.read(),_charset="utf-8")
fp.close()
msg['Subject'] = sub
msg['From'] = me
msg['To'] = ";".join(to_list)
try:
send_smtp = smtplib.SMTP()
send_smtp.connect(mail_host)
send_smtp.login(mail_user+"@"+mail_postfix, mail_pass)
send_smtp.sendmail(me, to_list, msg.as_string())
send_smtp.close()
return True
except Exception, e:
print str(e)
return False
if send_mail(mailto_list,"标题"):
print "测试成功"
else:
print "测试失败"
发送附件
#!/usr/bin/python
#encoding:utf8
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
def send_mail(to_list, sub, filename):
me = mail_user + "<"+mail_user+"@"+mail_postfix+">"
msg = MIMEMultipart()
msg['Subject'] = sub
msg['From'] = me
msg['To'] = ";".join(to_list)
submsg = MIMEBase('application', 'x-xz')
submsg.set_payload(open(filename,'rb').read())
encoders.encode_base64(submsg)
submsg.add_header('Content-Disposition', 'attachment', filename=filename)
msg.attach(submsg)
try:
send_smtp = smtplib.SMTP()
send_smtp.connect(mail_host)
send_smtp.login(mail_user, mail_pass)
send_smtp.sendmail(me, to_list, msg.as_string())
send_smtp.close()
return True
except Exception, e:
print str(e)[1]
return False
# 设置服务器名称、用户名、密码以及邮件后缀
mail_host = "smtp.163.com"
mail_user = "xuesong"
mail_pass = "mailpasswd"
mail_postfix = "163.com"
mailto_list = ["272121935@qq.com","quanzhou722@163.com"]
title = 'check'
filename = 'file_check.html'
if send_mail(mailto_list,title,filename):
print "发送成功"
else:
print "发送失败"
gzip [解压缩gzip 删除原文件]
#压缩gzip
import gzip
f_in = open('file.log', 'rb')
f_out = gzip.open('file.log.gz', 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
#压缩gzip
File = 'xuesong_18.log'
g = gzip.GzipFile(filename="", mode='wb', compresslevel=9, fileobj=open((r'%s.gz' %File),'wb'))
g.write(open(r'%s' %File).read())
g.close()
#解压gzip
g = gzip.GzipFile(mode='rb', fileobj=open((r'xuesong_18.log.gz'),'rb'))
open((r'xuesong_18.log'),'wb').write(g.read())
tarfile [归档压缩tar.gz 保留原文件]
# 压缩tar.gz
import os
import tarfile
tar = tarfile.open("/tmp/tartest.tar.gz","w:gz") # 创建压缩包名
for path,dir,files in os.walk("/tmp/tartest"): # 递归文件目录
for file in files:
fullpath = os.path.join(path,file)
tar.add(fullpath) # 创建压缩包
tar.close()
# 解压tar.gz
import tarfile
tar = tarfile.open("/tmp/tartest.tar.gz")
#tar.extract("/tmp") # 全部解压到指定路径
names = tar.getnames() # 包内文件名
for name in names:
tar.extract(name,path="./") # 解压指定文件
tar.close()
zipfile [解压缩zip 最大2G]
# 压缩zip
import zipfile,os
f = zipfile.ZipFile('filename.zip', 'w' ,zipfile.ZIP_DEFLATED) # ZIP_STORE 为默认表不压缩. ZIP_DEFLATED 表压缩
#f.write('file1.txt') # 将文件写入压缩包
for path,dir,files in os.walk("tartest"): # 递归压缩目录
for file in files:
f.write(os.path.join(path,file)) # 将文件逐个写入压缩包
f.close()
# 解压zip
if zipfile.is_zipfile('filename.zip'): # 判断一个文件是不是zip文件
f = zipfile.ZipFile('filename.zip')
for file in f.namelist(): # 返回文件列表
f.extract(file, r'/tmp/') # 解压指定文件
#f.extractall() # 解压全部
f.close()
time/datetime [时间]
import time
time.strftime('%Y%m%d_%H%M') # 格式化时间
time.time() # 时间戳[浮点]
int(time.time()) # 时间戳[整s]
time.localtime()[1] - 1 # 上个月
time.strftime('%Y-%m-%d_%X',time.localtime( time.time() ) ) # 时间戳转日期
time.mktime(time.strptime('2012-03-28 06:53:40', '%Y-%m-%d %H:%M:%S')) # 日期转时间戳
最近的周五
from datetime import datetime
from dateutil.relativedelta import relativedelta, FR
(datetime.now() + relativedelta(weekday=FR(-1))).strftime('%Y%m%d')
获取本周一
import datetime
datetime.date.today() - datetime.timedelta(days=datetime.date.today().weekday())
判断输入时间格式是否正确
#encoding:utf8
import time
while 1:
atime=raw_input('输入格式如[14.05.13 13:00]:')
try:
btime=time.mktime(time.strptime('%s:00' %atime, '%y.%m.%d %H:%M:%S'))
break
except:
print '时间输入错误,请重新输入,格式如[14.05.13 13:00]'
上一个月最后一天
import datetime
lastMonth=datetime.date(datetime.date.today().year,datetime.date.today().month,1)-datetime.timedelta(1)
lastMonth.strftime("%Y/%m")
前一天
(datetime.datetime.now() + datetime.timedelta(days=-1) ).strftime('%Y%m%d')
两日期相差天数
import datetime
d1 = datetime.datetime(2005, 2, 16)
d2 = datetime.datetime(2004, 12, 31)
(d1 - d2).days
向后加10个小时
import datetime
d1 = datetime.datetime.now()
d3 = d1 + datetime.timedelta(hours=10)
d3.ctime()
optparse [解析参数及标准提示]
import os, sys
import time
import optparse
# python aaa.py -t file -p /etc/opt -o aaaaa
def do_fiotest( type, path, output,):
print type, path, output,
def main():
parser = optparse.OptionParser()
parser.add_option('-t', '--type', dest = 'type', default = None, help = 'test type[file, device]')
parser.add_option('-p', '--path', dest = 'path', default = None, help = 'test file path or device path')
parser.add_option('-o', '--output', dest = 'output', default = None, help = 'result dir path')
(o, a) = parser.parse_args()
if None == o.type or None == o.path or None == o.output:
print "No device or file or output dir"
return -1
if 'file' != o.type and 'device' != o.type:
print "You need specify test type ['file' or 'device']"
return -1
do_fiotest(o.type, o.path, o.output)
print "Test done!"
if __name__ == '__main__':
main()
getopt [解析参数]
import sys,os
import getopt
try:
options,argsErr = getopt.getopt(sys.argv[1:],"hu:c:",["help","user=","cmd="]) # 中间短参数,后面长参数对应. 不带:或=代表不带参数
except getopt.GetoptError:
print "Unknown parameters,More info with: %s -h" %(sys.argv[0])
sys.exit(2)
if argsErr != []:
print "Unknown parameters,More info with: %s -h" %(sys.argv[0])
sys.exit(2)
for o,a in options:
if o in ("-h","--help"):
print '''Usage: python te.py -u user -c "cmd -options" '''
sys.exit(2)
if o in ("-u","--user"):
user = a
if o in ("-c","--cmd"):
cmd = a
print user,cmd
argparse [命令行选项和参数解析库]
import argparse
parser = argparse.ArgumentParser( prog='usage_name', description='开头打印', epilog="结束打印")
parser.add_argument('-f', '--foo', help='foo help', action='append') # 可选参数,如使用此参数必须传值 action='store_true' 不加参数为True action='append' 多个参数可叠加为列表
parser.add_argument('--aa', type=int, default=42, help='aa!') # type规定参数类型,default设置默认值
parser.add_argument('bar', nargs='*', default=[1, 2, 3], help='BAR!') # 位置参数 必须传递 nargs=2 需要传递2个参数
parser.add_argument('args', nargs=argparse.REMAINDER) # 剩余参数收集到列表
parser.print_help() # 打印使用帮助
#parser.parse_args('BAR --foo FOO'.split()) # 设置位置参数
args = parser.parse_args() # 全部的值
parser.get_default('foo') # 获取
python a.py --foo ww --aa 40 xuesong 27 # 执行此脚本
subprocess [子进程管理]
import subprocess
s=subprocess.Popen('sleep 20', shell=True, \
stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
print s.wait() # 阻塞等待子进程完成并返回状态码 shell 0为正确 但管道内容过多会造成死锁可以用 communicate()
print s.stdout.read()
print s.stderr.read()
print s.communicate() # 返回元组 (stdout, stderr) 会阻塞等待进程完成 推荐使用
print s.returncode # 返回执行状态码
base64 [编码]
# 简单但容易被直接破解
import base64
s1 = base64.encodestring('hello world')
s2 = base64.decodestring(s1)
uu [对文件uu编码]
import uu
uu.encode('in_file','out_file') # 编码
uu.decode('in_file','out_file') # 解码
binascii [ascii和二进制编码转换]
md5 [单向MD5加密]
import md5
m = md5.new('123456').hexdigest()
hashlib [hash算法库]
import hashlib
m = hashlib.md5()
m.update("Nobody inspects") # 使用update方法对字符串md5加密
m.digest() # 加密后二进制结果
m.hexdigest() # 加密后十进制结果
hashlib.new("md5", "string").hexdigest() # 对字符串加密
hashlib.new("md5", open("file").read()).hexdigest() # 查看文件MD5值
hashlib.sha224("Nobody inspects the spammish repetition").hexdigest() # 几种hash算法 sha1 sha224 sha256 sha384 ha512
crypt [单向加密]
import crypt
import random,string
def getsalt(chars = string.letters+string.digits):
return random.choice(chars)+random.choice(chars)
salt = getsalt()
print salt
print crypt.crypt('bananas',salt)
pycrypto [加密]
# https://github.com/dlitz/pycrypto
SHA256 # 不可逆散列算法加密
from Crypto.Hash import SHA256
hash = SHA256.new()
hash.update('message')
hash.digest()
AES # 可逆加密,需要密钥
from Crypto.Cipher import AES
obj = AES.new('This is a key123', AES.MODE_CBC, 'This is an IV456')
message = "The answer is no"
ciphertext = obj.encrypt(message)
ciphertext
'\xd6\x83\x8dd!VT\x92\xaa`A\x05\xe0\x9b\x8b\xf1'
obj2 = AES.new('This is a key123', AES.MODE_CBC, 'This is an IV456')
obj2.decrypt(ciphertext)
'The answer is no'
rsa [公钥加密算法]
http://www.heikkitoivonen.net/m2crypto/api/M2Crypto.RSA.RSA-class.html
pip install M2Crypto
from M2Crypto import RSA,BIO # help(RSA)
rsa = RSA.gen_key(2048, 'sha1') # 设置生成密钥为2048位,1024较不安全,默认算法sha1
rsa.save_key('rsa.priv.pem', None ) # 生成私钥pem文件
rsa.save_pub_key('rsa.pub.pem') # 生成公钥pem文件
rsa.save_key_bio() # 私钥保存到pem格式的M2Crypto.BIO.BIO对象
rsa.save_pub_key_bio() # 公钥保存到pem格式的M2Crypto.BIO.BIO对象
priv=RSA.load_key('rsa.priv.pem') # 加载私钥文件
pub=RSA.load_pub_key('rsa.pub.pem') # 加载公钥文件
rsa.check_key() # 检查key是否初始化
pub_key.public_encrypt('data',RSA.pkcs1_padding) # 公钥加密
priv_key.private_decrypt('密文',RSA.pkcs1_padding) # 私钥解密
from M2Crypto import RSA,BIO
rsa = RSA.gen_key(2048, 3, lambda *agr:None)
pub_bio = BIO.MemoryBuffer()
priv_bio = BIO.MemoryBuffer()
rsa.save_pub_key_bio(pub_bio)
rsa.save_key_bio(priv_bio, None)
# print pub_bio.read_all()
pub_key = RSA.load_pub_key_bio(pub_bio)
priv_key = RSA.load_key_bio(priv_bio)
message = 'i am luchanghong'
encrypted = pub_key.public_encrypt(message, RSA.pkcs1_padding) # 加密
decrypted = priv_key.private_decrypt(encrypted, RSA.pkcs1_padding) # 解密
print decrypted
getpass [隐藏输入密码]
import getpass
passwd=getpass.getpass()
string [字符串类]
import string
string.ascii_letters # a-zA-Z ascii的不受语言系统环境变化
string.ascii_lowercase # a-z
string.letters # a-zA-Z 受系统语言环境变化影响
string.lowercase # a-z
string.uppercase # A-Z大小
string.digits # 0-9
string.printable # 所有字符
string.whitespace # 空白字符
Gittle [python的git库]
pip install gittle
from gittle import Gittle
repo_path = '/tmp/gittle_bare'
repo_url = 'git://github.com/FriendCode/gittle.git'
repo = Gittle.clone(repo_url, repo_path)
auth = GittleAuth(pkey=key) # 认证
Gittle.clone(repo_url, repo_path, auth=auth)
repo = Gittle.clone(repo_url, repo_path, bare=True) # 克隆仓库没有目录的
repo = Gittle.init(path) # 初始化
repo.commits # 获取提交列表
repo.branches # 获取分支列表
repo.modified_files # 被修改的文件列表
repo.diff('HEAD', 'HEAD~1') # 获取最新提交差异
repo.stage('file.txt') # 提交文件
repo.stage(['other1.txt', 'other2.txt']) # 提交文件列表
repo.commit(name="Samy Pesse", email="samy@friendco.de", message="This is a commit") # 更新信息
repo = Gittle(repo_path, origin_uri=repo_url)
key_file = open('/Users/Me/keys/rsa/private_rsa')
repo.auth(pkey=key_file)
repo.push() # 远端push提交操作
repo = Gittle(repo_path, origin_uri=repo_url)
key_file = open('/Users/Me/keys/rsa/private_rsa')
repo.auth(pkey=key_file)
repo.pull() # 拉取最新分支
repo.create_branch('dev', 'master') # 创建分支
repo.switch_branch('dev') # 切换到分支
repo.create_orphan_branch('NewBranchName') # 创建一个空的分支
repo.remove_branch('dev') # 删除分支
paramiko [ssh客户端]
安装
sudo apt-get install python-setuptools
easy_install
sudo apt-get install python-all-dev
sudo apt-get install build-essential
paramiko实例(账号密码登录执行命令)
#!/usr/bin/python
#ssh
import paramiko
import sys,os
host = '10.152.15.200'
user = 'peterli'
password = '123456'
s = paramiko.SSHClient() # 绑定实例
s.load_system_host_keys() # 加载本地HOST主机文件
s.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # 允许连接不在know_hosts文件中的主机
s.connect(host,22,user,password,timeout=5) # 连接远程主机
while True:
cmd=raw_input('cmd:')
stdin,stdout,stderr = s.exec_command(cmd) # 执行命令
cmd_result = stdout.read(),stderr.read() # 读取命令结果
for line in cmd_result:
print line,
s.close()
paramiko实例(传送文件)
#!/usr/bin/evn python
import os
import paramiko
host='127.0.0.1'
port=22
username = 'peterli'
password = '123456'
ssh=paramiko.Transport((host,port))
privatekeyfile = os.path.expanduser('~/.ssh/id_rsa')
mykey = paramiko.RSAKey.from_private_key_file( os.path.expanduser('~/.ssh/id_rsa')) # 加载key 不使用key可不加
ssh.connect(username=username,password=password) # 连接远程主机
# 使用key把 password=password 换成 pkey=mykey
sftp=paramiko.SFTPClient.from_transport(ssh) # SFTP使用Transport通道
sftp.get('/etc/passwd','pwd1') # 下载 两端都要指定文件名
sftp.put('pwd','/tmp/pwd') # 上传
sftp.close()
ssh.close()
paramiko实例(密钥执行命令)
#!/usr/bin/python
#ssh
import paramiko
import sys,os
host = '10.152.15.123'
user = 'peterli'
s = paramiko.SSHClient()
s.load_system_host_keys()
s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
privatekeyfile = os.path.expanduser('~/.ssh/id_rsa') # 定义key路径
mykey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
# mykey=paramiko.DSSKey.from_private_key_file(privatekeyfile,password='061128') # DSSKey方式 password是key的密码
s.connect(host,22,user,pkey=mykey,timeout=5)
cmd=raw_input('cmd:')
stdin,stdout,stderr = s.exec_command(cmd)
cmd_result = stdout.read(),stderr.read()
for line in cmd_result:
print line,
s.close()
ssh并发(Pool控制最大并发)
#!/usr/bin/env python
#encoding:utf8
#ssh_concurrent.py
import multiprocessing
import sys,os,time
import paramiko
def ssh_cmd(host,port,user,passwd,cmd):
msg = "-----------Result:%s----------" % host
s = paramiko.SSHClient()
s.load_system_host_keys()
s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
s.connect(host,22,user,passwd,timeout=5)
stdin,stdout,stderr = s.exec_command(cmd)
cmd_result = stdout.read(),stderr.read()
print msg
for line in cmd_result:
print line,
s.close()
except paramiko.AuthenticationException:
print msg
print 'AuthenticationException Failed'
except paramiko.BadHostKeyException:
print msg
print "Bad host key"
result = []
p = multiprocessing.Pool(processes=20)
cmd=raw_input('CMD:')
f=open('serverlist.conf')
list = f.readlines()
f.close()
for IP in list:
print IP
host=IP.split()[0]
port=int(IP.split()[1])
user=IP.split()[2]
passwd=IP.split()[3]
result.append(p.apply_async(ssh_cmd,(host,port,user,passwd,cmd)))
p.close()
for res in result:
res.get(timeout=35)
ssh并发(取文件状态并发送邮件)
#!/usr/bin/python
#encoding:utf8
#config file: ip.list
import paramiko
import multiprocessing
import smtplib
import sys,os,time,datetime,socket,re
from email.mime.text import MIMEText
# 配置文件(IP列表)
Conf = 'ip.list'
user_name = 'peterli'
user_pwd = 'passwd'
port = 22
PATH = '/home/peterli/'
# 设置服务器名称、用户名、密码以及邮件后缀
mail_host = "smtp.163.com"
mail_user = "xuesong"
mail_pass = "mailpasswd"
mail_postfix = "163.com"
mailto_list = ["272121935@qq.com","quanzhou722@163.com"]
title = 'file check'
DATE1=(datetime.datetime.now() + datetime.timedelta(days=-1) ).strftime('%Y%m%d')
file_path = '%s%s' %(PATH,DATE1)
def Ssh_Cmd(file_path,host_ip,user_name,user_pwd,port=22):
s = paramiko.SSHClient()
s.load_system_host_keys()
s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
s.connect(hostname=host_ip,port=port,username=user_name,password=user_pwd)
stdin,stdout,stderr = s.exec_command('stat %s' %file_path)
stat_result = '%s%s' %(stdout.read(),stderr.read())
if stat_result.find('No such file or directory') == -1:
file_status = 'OK\t'
stdin,stdout,stderr = s.exec_command('du -sh %s' %file_path)
cmd1_result = '%s_%s' %(stat_result.split()[32],stat_result.split()[33].split('.')[0])
cmd2_result = ('%s%s' %(stdout.read(),stderr.read())).split()[0]
else:
file_status = '未生成\t'
cmd1_result = 'null'
cmd2_result = 'null'
q.put(['Login successful'])
s.close()
except socket.error:
file_status = '主机或端口错误'
cmd1_result = '-'
cmd2_result = '-'
except paramiko.AuthenticationException:
file_status = '用户或密码错误'
cmd1_result = '-'
cmd2_result = '-'
except paramiko.BadHostKeyException:
file_status = 'Bad host key'
cmd1_result = '-'
cmd2_result = '-'
except:
file_status = 'ssh异常'
cmd1_result = '-'
cmd2_result = '-'
r.put('%s\t-\t%s\t%s\t%s\t%s\n' %(time.strftime('%Y-%m-%d_%H:%M'),host_ip,file_status,cmd2_result,cmd1_result))
def Concurrent(Conf,file_path,user_name,user_pwd,port):
# 执行总计
total = 0
# 读取配置文件
f=open(Conf)
list = f.readlines()
f.close()
# 并发执行
process_list = []
log_file = file('file_check.log', 'w')
log_file.write('检查时间\t\t业务\tIP\t\t文件状态\t大小\t生成时间\n')
for host_info in list:
# 判断配置文件中注释行跳过
if host_info.startswith('#'):
continue
# 取变量,其中任意变量未取到就跳过执行
try:
host_ip=host_info.split()[0].strip()
#user_name=host_info.split()[1]
#user_pwd=host_info.split()[2]
except:
log_file.write('Profile error: %s\n' %(host_info))
continue
#try:
# port=int(host_info.split()[3])
#except:
# port=22
total +=1
p = multiprocessing.Process(target=Ssh_Cmd,args=(file_path,host_ip,user_name,user_pwd,port))
p.start()
process_list.append(p)
for j in process_list:
j.join()
for j in process_list:
log_file.write(r.get())
successful = q.qsize()
log_file.write('执行完毕。 总执行:%s 登录成功:%s 登录失败:%s\n' %(total,successful,total - successful))
log_file.flush()
log_file.close()
def send_mail(to_list, sub):
me = mail_user + "<"+mail_user+"@"+mail_postfix+">"
fp = open('file_check.log')
msg = MIMEText(fp.read(),_charset="utf-8")
fp.close()
msg['Subject'] = sub
msg['From'] = me
msg['To'] = ";".join(to_list)
try:
send_smtp = smtplib.SMTP()
send_smtp.connect(mail_host)
send_smtp.login(mail_user, mail_pass)
send_smtp.sendmail(me, to_list, msg.as_string())
send_smtp.close()
return True
except Exception, e:
print str(e)[1]
return False
if __name__ == '__main__':
q = multiprocessing.Queue()
r = multiprocessing.Queue()
Concurrent(Conf,file_path,user_name,user_pwd,port)
if send_mail(mailto_list,title):
print "发送成功"
else:
print "发送失败"
pysnmp [snmp客户端]
#!/usr/bin/python
from pysnmp.entity.rfc3413.oneliner import cmdgen
cg = cmdgen.CommandGenerator()
# 注意IP 端口 组默认public oid值
varBinds = cg.getCmd( cmdgen.CommunityData('any-agent', 'public',0 ), cmdgen.UdpTransportTarget(('10.10.76.42', 161)), (1,3,6,1,4,1,2021,10,1,3,1), )
print varBinds[3][0][1]
PDB [单步调试]
# 很多程序因为被try了,看不到具体报错的地方, 用这个模块就很清晰可以看到错误的位置
# http://docs.python.org/2/library/pdb.html
(Pdb) h # 帮助
# 断点设置
(Pdb)b 10 # 断点设置在本py的第10行
(Pdb)b ots.py:20 # 断点设置到 ots.py第20行
(Pdb)b # 查看断点编号
(Pdb)cl 2 # 删除第2个断点
# 运行
(Pdb)n # 单步运行
(Pdb)s # 细点运行 也就是会下到,方法
(Pdb)c # 跳到下个断点
# 查看
(Pdb)p param # 查看当前 变量值
(Pdb)l # 查看运行到某处代码
(Pdb)a # 查看全部栈内变量
!a = 100 # 直接赋值
python -m pdb myscript.py # 直接对脚本单步调试
# 在程序里面加单步调试
import pdb
def tt():
pdb.set_trace()
for i in range(1, 5):
print i
>>> tt()
> <stdin>(3)tt()
(Pdb) n #这里支持 n p c 而已
pstats [源码性能分析测试]
import profile
import pstats
profile.run("run()", "prof.txt")
p = pstats.Stats("prof.txt")
p.sort_stats("time").print_stats()
apscheduler [任务调度]
# 安装 pip install apscheduler
# 例子 https://bitbucket.org/agronholm/apscheduler/src/e6298f953a68/tests/?at=master
scheduler.start() # 启动任务
job = scheduler.add_job(myfunc, 'interval', minutes=2) # 添加任务
job.remove() # 删除任务
scheduler.add_job(myfunc, 'interval', minutes=2, id='my_job_id') # 添加任务
scheduler.remove_job('my_job_id') # 删除任务
job.modify(max_instances=6, name='Alternate name') # 修改工作
scheduler.shutdown() # 关闭调度
scheduler.shutdown(wait=False) # 关闭调度 不等待
# 暂停
apscheduler.job.Job.pause()
apscheduler.schedulers.base.BaseScheduler.pause_job()
# 恢复
apscheduler.job.Job.resume()
apscheduler.schedulers.base.BaseScheduler.resume_job()
定时任务
from pytz import utc
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
import time
executors = {
'default': ThreadPoolExecutor(20),
'processpool': ProcessPoolExecutor(5)
}
job_defaults = {
'coalesce': False,
'max_instances': 3
}
scheduler = BackgroundScheduler( executors=executors, job_defaults=job_defaults, timezone=utc)
def myfunc():
print 'test'
scheduler.add_job(myfunc, 'interval', minutes=1, id='myworkid')
scheduler.start()
try:
while True:
time.sleep(2)
# add_job
except (KeyboardInterrupt, SystemExit):
scheduler.shutdown()
logging [日志记录]
# 日志级别大小关系为: critical > error > warning > info > debug > notset 也可自定义日志级别
import logging
logging.debug('debug') # 默认日志级别为 warning ,故debug日志不做打印
logging.warning('warning') # 达到默认日志级别为WARNING,打印到屏幕 warning
logging.basicConfig # 通过logging.basicConfig函数对日志的输出格式及方式做相关配置
# basicConfig 相关参数帮助
filename # 指定日志文件名
filemode # 和file函数意义相同,指定日志文件的打开模式,'w'或'a'
datefmt # 指定时间格式,同time.strftime()
level # 设置日志级别,默认为logging.WARNING
stream # 指定将日志的输出流,可以指定输出到sys.stderr,sys.stdout或者文件,默认输出到sys.stderr,当stream和filename同时指定时,stream被忽略
format # 指定输出的格式和内容,format可以输出很多有用信息,如上例所示:
%(levelno)s # 打印日志级别的数值
%(levelname)s # 打印日志级别名称
%(pathname)s # 打印当前执行程序的路径,其实就是sys.argv[0]
%(filename)s # 打印当前执行程序名
%(funcName)s # 打印日志的当前函数
%(lineno)d # 打印日志的当前行号
%(asctime)s # 打印日志的时间
%(thread)d # 打印线程ID
%(threadName)s # 打印线程名称
%(process)d # 打印进程ID
%(message)s # 打印日志信息
logging.config.fileConfig("logger.conf") # 加载配置文件
logger = logging.getLogger("example02") # 使用已定义的日志记录器
logger.conf # 配置文件
###############################################
[loggers]
keys=root,example01,example02 # 设置三种日志记录器
[logger_root] # 针对单一种设置
level=DEBUG
handlers=hand01,hand02
[logger_example01]
handlers=hand01,hand02 # 使用2中处理方式 应该是根据不同级别区分的
qualname=example01
propagate=0
[logger_example02]
handlers=hand01,hand03
qualname=example02
propagate=0
###############################################
[handlers] # 不同的处理方式
keys=hand01,hand02,hand03 # 三种方式的名字
[handler_hand01] # 第一种方式配置
class=StreamHandler # 发送错误信息到流
level=INFO # 日志级别
formatter=form02 # 日志的格式方式
args=(sys.stderr,)
[handler_hand02]
class=FileHandler # FileHandler写入磁盘文件
level=DEBUG
formatter=form01
args=('myapp.log', 'a') # 追加到日志文件
[handler_hand03]
class=handlers.RotatingFileHandler
level=INFO
formatter=form02
args=('myapp.log', 'a', 10*1024*1024, 5) # 追加日志并切割日志
###############################################
[formatters] # 针对不同处理日志方式设置具体的日志格式
keys=form01,form02
[formatter_form01]
format=%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s # 日志列
datefmt=%a, %d %b %Y %H:%M:%S # 时间格式
[formatter_form02]
format=%(name)-12s: %(levelname)-8s %(message)s
datefmt=
通用日志记录
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename='/var/log/myapp.log',
filemode='a')
# 日志级别DEBUG或高于DEBUG的会写入文件 myapp.log 中
logging.debug('debug message')
logging.info('info message')
logging.warning('warning message')
ConfigParser [配置解析]
写入配置文件
import ConfigParser
config = ConfigParser.RawConfigParser()
config.add_section('Section1') # 添加配置文件的块 [name]
config.set('Section1', 'an_int', '15') # 针对块设置配置参数和值
config.set('Section1', 'a_bool', 'true')
config.set('Section1', 'a_float', '3.1415')
config.set('Section1', 'baz', 'fun')
config.set('Section1', 'bar', 'Python')
config.set('Section1', 'foo', '%(bar)s is %(baz)s!')
with open('example.cfg', 'wb') as configfile: # 指定配置文件路径
config.write(configfile) # 写入配置文件
读取配置文件
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('example.cfg') # 读取配置文件
a_float = config.getfloat('Section1', 'a_float') # 获取配置文件参数对应的浮点值,如参数值类型不对则报ValueError
an_int = config.getint('Section1', 'an_int') # 获取配置文件参数对应的整数值,可直接进行计算
print a_float + an_int
if config.getboolean('Section1', 'a_bool'): # 根据配置文件参数值是否为真
print config.get('Section1', 'foo') # 再获取依赖的配置参数 get获取后值为字符串
print config.get('Section1', 'foo', 0) # 获取配置文件参数的同时加载变量[配置文件中的参数]
print config.get('Section1', 'foo', 1) # 获取配置文件参数 原始值不做任何改动 不使用变量
config.remove_option('Section1', 'bar') # 删除读取配置文件获取bar的值
config.remove_option('Section1', 'baz')
print config.get('Section1', 'foo', 0, {'bar': 'str1', 'baz': 'str1'}) # 读取配置参数的同时设置变量的值
import ConfigParser
import io
sample_config = """
[mysqld]
user = mysql
pid-file = /var/run/mysqld/mysqld.pid
skip-external-locking
old_passwords = 1
skip-bdb
skip-innodb
"""
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.readfp(io.BytesIO(sample_config))
config.get("mysqld", "user")
ftplib [ftp客户端]
from ftplib import FTP
ftp = FTP('ftp.debian.org') # 连接ftp地址 FTP(host,port,timeout)
ftp.login() # 使用默认anonymous登录 login(user,passwd)
ftp.cwd('debian') # 切换到目录debian
ftp.retrlines('LIST') # 打印目录列表
ftp.retrbinary('RETR README', open('README', 'wb').write) # 下载文件写到本地
ftp.delete('filename') # 删除ftp中文件
ftp.mkd('dirname') # 在ftp上创建目录
ftp.size('filename') # 查看文件大小
ftp.quit()
difflib [对象比较]
import difflib
s1 = ['bacon\n', 'eggs\n', 'ham\n', 'guido\n']
s2 = ['python\n', 'eggy\n', 'hamster\n', 'guido\n']
for line in difflib.context_diff(s1, s2, fromfile='txt-s1', tofile='txt-s2'): # 两字列表比较差异
sys.stdout.write(line)
difflib.get_close_matches('appel', ['ape', 'apple', 'peach', 'puppy']) # 模糊匹配 匹配列表与字符串相似的值,越相似越靠前
heapq [优先队列算法]
from heapq import *
h = []
heappush(h, (5, 'write code')) # 放入队列
heappush(h, (7, 'release product'))
heappush(h, (1, 'write spec'))
heappush(h, (3, 'create tests'))
heappop(h) # 从队列取出 第一次是1
from heapq import *
def heapsort(iterable):
h = []
for value in iterable:
heappush(h, value)
return [heappop(h) for i in range(len(h))]
heapsort([1, 3, 5, 7, 9, 2, 4, 6, 8, 0])
linecache [随机读取指定行]
import linecache
linecache.getline('/etc/passwd', 4)
json [数据交换格式]
#!/usr/bin/python
import json
#json file temp.json
#{ "name":"00_sample_case1", "description":"an example."}
f = file("temp.json");
s = json.load(f) # 直接读取json文件
print s
f.close
d = {"a":1}
j=json.dumps(d) # 字典转json
json.loads(j) # json转字典
s = json.loads('{"name":"test", "type":{"name":"seq", "parameter":["1", "2"]}}')
print type(s) # dic
print s
print s.keys()
print s["type"]["parameter"][1]
json.dumps({'ret':'cmd_ret0', 'out':'cmd_ret1'}, separators=(',', ':')) # 紧凑的json格式,去掉空格
filecmp [文件目录比较]
filecmp.cmp('/etc/passwd', '/etc/passwd') # 比较两文件是否一致
# 比较两目录下文件是否一致
from filecmp import dircmp
def print_diff_files(dcmp):
for name in dcmp.diff_files:
print "diff_file %s found in %s and %s" % (name, dcmp.left, dcmp.right)
for sub_dcmp in dcmp.subdirs.values():
print_diff_files(sub_dcmp)
dcmp = dircmp('dir1', 'dir2')
print_diff_files(dcmp)
errno [符号错误码]
https://docs.python.org/2/library/errno.html#module-errno
import errno
try:
fp = open("no.such.file")
except IOError, (error, message):
if error == errno.ENOENT:
print "no such file"
elif error == errno.EPERM:
print "permission denied"
else:
print message
Exceptions [标准异常类]
# 详见官网 不需要导入
https://docs.python.org/2/library/exceptions.html#module-exceptions
ctypes [调用C的动态库]
提供和C语言兼容的数据类型,也可调用C的动态库
http://blog.csdn.net/linda1000/article/details/12623527
http://www.cnblogs.com/wuchang/archive/2010/04/04/1704456.html
http://www.ibm.com/developerworks/cn/linux/l-cn-pythonandc/
daemon [守护进程]
daemon.py
# 创建守护进程的模块
#!/usr/bin/env python
import sys, os, time, atexit
from signal import SIGTERM
class Daemon:
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile='nbMon.pid', stdin='/dev/null', stdout='nbMon.log', stderr='nbMon.log'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
#os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
run_daemon.py
# 启动脚本,倒入需要后台启动的模块,继承Daemon类,覆盖run函数
# 启动方式 python run_daemon.py start
#!/usr/bin/env python
import Queue
import threading
import sys, time
import urllib2
import json
import framework
from moniItems import mon
from daemon import Daemon
class MyDaemon(Daemon):
def run(self):
print 'start'
framework.startTh()
print 'stop2'
if __name__ == "__main__":
daemon = MyDaemon()
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
psutil [获取系统信息]
pip install psutil # 安装
import psutil
dir(psutil)
psutil.boot_time() # 开机时间
psutil.virtual_memory() # 内存详细信息
psutil.virtual_memory().total # 内存总大小
psutil.disk_partitions() # 获取磁盘信息
psutil.disk_io_counters() # 磁盘IO信息
psutil.net_io_counters() # 获取网络IO信息
psutil.pids() # 返回所有进程PID
psutil.Process(PID) # 获取进程信息
psutil.Process(PID).name() # 指定进程的进程名
psutil.Process(PID).exe() # 进程的路径
psutil.Process(PID).cwd() # 进程工作路径
psutil.Process(PID).status() # 进程状态
psutil.Process(PID).create_time() # 进程创建时间
psutil.Process(PID).memory_percent() # 进程内存使用率
psutil.Process(PID).io_counters() # 进程IO信息
psutil.Process(PID).num_threads() # 进程线程数
ldap [统一认证]
yum install openldap openldap-clients openldap-devel openssl-devel setuptools==30.1.0
sudo pip uninstall ldap ldap3
pip install python-ldap
import ldap
con = ldap.initialize("ldap://10.10.10.156:389")
con.simple_bind_s("cn=admin,ou=People,dc=gt,dc=com", "pwd")
res = con.search_s("dc=gt,dc=com", ldap.SCOPE_SUBTREE, '(uid=*)', ['*', '+'], 0)
watchdog [监视文件实时写入]
https://pypi.python.org/pypi/watchdog
pip install watchdog
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = sys.argv[1] if len(sys.argv) > 1 else '.'
event_handler = LoggingEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
yaml [标记语言]
pip install pyyaml
import yaml
a = yaml.load("""
name: Vorlin Laruknuzum
sex: Male
class: Priest
title: Acolyte
hp: [32, 71]
sp: [1, 13]
gold: 423
inventory:
- a Holy Book of Prayers (Words of Wisdom)
- an Azure Potion of Cure Light Wounds
- a Silver Wand of Wonder
""")
print a['inventory'][1] # 字典
print yaml.dump(a) # 把字典生成yaml文件
yaml.load_all # 生成迭代器
print yaml.dump({'name': "The Cloak 'Colluin'", 'depth': 5, 'rarity': 45,
'weight': 10, 'cost': 50000, 'flags': ['INT', 'WIS', 'SPEED', 'STEALTH']})
itertools [迭代功能函数]
import itertools
# 全排序
print list(itertools.permutations(['a', 'b', 'c', 'd'],4))
# 无限迭代
ns = itertools.count(1)
for n in ns:
print n
# 指定次数循环
ns = itertools.repeat('A', 10)
for n in ns:
print n
3 socket
socket.gethostname() # 获取主机名
from socket import * # 避免 socket.socket()
s=socket()
s.bind() # 绑定地址到套接字
s.listen() # 开始TCP监听
s.accept() # 被动接受TCP客户端连接,等待连接的到来
s.connect() # 主动初始化TCP服务器连接
s.connect_ex() # connect()函数的扩展版本,出错时返回出错码,而不是跑出异常
s.recv() # 接收TCP数据
s.send() # 发送TCP数据
s.sendall() # 完整发送TCP数据
s.recvfrom() # 接收UDP数据
s.sendto() # 发送UDP数据
s.getpeername() # 连接到当前套接字的远端的地址(TCP连接)
s.getsockname() # 当前套接字的地址
s.getsockopt() # 返回指定套接字的参数
s.setsockopt() # 设置指定套接字的参数
s.close() # 关闭套接字
s.setblocking() # 设置套接字的阻塞与非阻塞模式
s.settimeout() # 设置阻塞套接字操作的超时时间
s.gettimeout() # 得到阻塞套接字操作的超时时间
s.makefile() # 创建一个与该套接字关联的文件对象
s.fileno() # 套接字获取对应的文件描述符fd
socket.AF_UNIX # 只能够用于单一的Unix系统进程间通信
socket.AF_INET # 服务器之间网络通信
socket.AF_INET6 # IPv6
socket.SOCK_STREAM # 流式socket , for TCP
socket.SOCK_DGRAM # 数据报式socket , for UDP
socket.SOCK_RAW # 原始套接字,普通的套接字无法处理ICMP、IGMP等网络报文,而SOCK_RAW可以;其次,SOCK_RAW也可以处理特殊的IPv4报文;此外,利用原始套接字,可以通过IP_HDRINCL套接字选项由用户构造IP头。
socket.SOCK_RDM # 是一种可靠的UDP形式,即保证交付数据报但不保证顺序。SOCK_RAM用来提供对原始协议的低级访问,在需要执行某些特殊操作时使用,如发送ICMP报文。SOCK_RAM通常仅限于高级用户或管理员运行的程序使用。
socket.SOCK_SEQPACKET # 可靠的连续数据包服务
socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # 关闭server后马上释放端口,避免被TIME_WAIT占用
select [IO多路复用的机制]
# select每次遍历都需要把fd集合从用户态拷贝到内核态,开销较大,受系统限制最大1024
select.select(rlist, wlist, xlist[, timeout])
# poll和select很像 通过一个pollfd数组向内核传递需要关注的事件,没有描述符1024限制
select.poll()
# 创建epoll句柄,注册监听事件,通过回调函数等待事件产生,不做主动扫描,整个过程对fd只做一次拷贝.打开最大文件数后,不受限制,1GB内存大约是10万链接
select.epoll([sizehint=-1])
select.epoll
EPOLLIN # 监听可读事件
EPOLLET # 高速边缘触发模式,即触发后不会再次触发直到新接收数据
EPOLLOUT # 监听写事件
epoll.poll([timeout=-1[, maxevents=-1]]) # 等待事件,未指定超时时间[毫秒]则为一直阻塞等待
epoll.register(fd,EPOLLIN) # 向epoll句柄中注册,新来socket链接,监听可读事件
epoll.modify(fd, EPOLLET | EPOLLOUT) # 改变监听事件为边缘触发,监听写事件
epoll.fileno() # 通过链接对象得到fd
epoll.unregister(fd) # 取消fd监听事件
SocketServer
#!/usr/bin/python
#server.py
import SocketServer
import os
class MyTCP(SocketServer.BaseRequestHandler):
def handle(self):
# 应该已经封装好了 不需要这层while了 可能会引起大量 close_wait
while True:
self.data=self.request.recv(1024).strip()
if self.data == 'quit' or not self.data:break
cmd=os.popen(self.data).read()
if cmd == '':cmd= self.data + ': Command not found'
self.request.sendall(cmd)
if __name__ == '__main__':
HOST,PORT = '10.0.0.119',50007
server = SocketServer.ThreadingTCPServer((HOST,PORT),MyTCP)
server.serve_forever()
SocketClient
#!/usr/bin/python
#client.py
import socket
HOST='10.0.0.119'
PORT=50007
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((HOST,PORT))
while True:
while True:
cmd=raw_input('CMD:').strip()
if cmd != '':break
s.sendall(cmd)
data=s.recv(1024).split('\n')
print 'cmd:'
for line in data:print line
s.close()
ftp
ftpserver
#!/usr/bin/python
#ftpserver.py
import SocketServer
import os
import cPickle
import md5
from time import sleep
def filer(file1):
try:
f = file(file1,'rb')
return cPickle.load(f)
except IOError:
return {}
except EOFError:
return {}
f.close()
def filew(file1,content):
f = file(file1,'wb')
cPickle.dump(content,f)
f.close()
class MyTCP(SocketServer.BaseRequestHandler):
def handle(self):
i = 0
while i<3:
user=self.request.recv(1024).strip()
userinfo=filer('user.pkl')
if userinfo.has_key(user.split()[0]):
if md5.new(user.split()[1]).hexdigest() == userinfo[user.split()[0]]:
results='login successful'
self.request.sendall(results)
login='successful'
break
else:
i = i + 1
results='Error:password not correct'
self.request.sendall(results)
continue
else:
i = i + 1
results='Error:password not correct'
self.request.sendall(results)
continue
break
else:
results = 'Error:Wrong password too many times'
self.request.sendall(results)
login='failure'
home_path = os.popen('pwd').read().strip() + '/' + user.split()[0]
current_path = '/'
print home_path
while True:
if login == 'failure':
break
print 'home_path:%s=current_path:%s' %(home_path,current_path)
cmd=self.request.recv(1024).strip()
print cmd
if cmd == 'quit':
break
elif cmd == 'dir':
list=os.listdir('%s%s' %(home_path,current_path))
if list:
dirlist,filelist = '',''
for i in list:
if os.path.isdir('%s%s%s' %(home_path,current_path,i)):
dirlist = dirlist + '\033[32m' + i + '\033[m\t'
else:
filelist = filelist + i + '\t'
results = dirlist + filelist
else:
results = '\033[31mnot find\033[m'
self.request.sendall(results)
elif cmd == 'pdir':
self.request.sendall(current_path)
elif cmd.split()[0] == 'mdir':
if cmd.split()[1].isalnum():
tmppath='%s%s%s' %(home_path,current_path,cmd.split()[1])
os.makedirs(tmppath)
self.request.sendall('\033[32mcreating successful\033[m')
else:
self.request.sendall('\033[31mcreate failure\033[m')
elif cmd.split()[0] == 'cdir':
if cmd.split()[1] == '/':
tmppath='%s%s' %(home_path,cmd.split()[1])
if os.path.isdir(tmppath):
current_path = cmd.split()[1]
self.request.sendall(current_path)
else:
self.request.sendall('\033[31mnot_directory\033[m')
elif cmd.split()[1].startswith('/'):
tmppath='%s%s' %(home_path,cmd.split()[1])
if os.path.isdir(tmppath):
current_path = cmd.split()[1] + '/'
self.request.sendall(current_path)
else:
self.request.sendall('\033[31mnot_directory\033[m')
else:
tmppath='%s%s%s' %(home_path,current_path,cmd.split()[1])
if os.path.isdir(tmppath):
current_path = current_path + cmd.split()[1] + '/'
self.request.sendall(current_path)
else:
self.request.sendall('\033[31mnot_directory\033[m')
elif cmd.split()[0] == 'get':
if os.path.isfile('%s%s%s' %(home_path,current_path,cmd.split()[1])):
f = file('%s%s%s' %(home_path,current_path,cmd.split()[1]),'rb')
self.request.sendall('ready_file')
sleep(0.5)
self.request.send(f.read())
f.close()
sleep(0.5)
elif os.path.isdir('%s%s%s' %(home_path,current_path,cmd.split()[1])):
self.request.sendall('ready_dir')
sleep(0.5)
for dirpath in os.walk('%s%s%s' %(home_path,current_path,cmd.split()[1])):
dir=dirpath[0].replace('%s%s' %(home_path,current_path),'',1)
self.request.sendall(dir)
sleep(0.5)
for filename in dirpath[2]:
self.request.sendall(filename)
sleep(0.5)
f = file('%s/%s' %(dirpath[0],filename),'rb')
self.request.send(f.read())
f.close()
sleep(0.5)
self.request.sendall('file_get_done')
sleep(0.5)
else:
self.request.sendall('dir_get_done')
sleep(0.5)
else:
self.request.sendall('get_failure')
continue
self.request.sendall('get_done')
elif cmd.split()[0] == 'send':
if os.path.exists('%s%s%s' %(home_path,current_path,cmd.split()[1])):
self.request.sendall('existing')
action=self.request.recv(1024)
if action == 'cancel':
continue
self.request.sendall('ready')
msg=self.request.recv(1024)
if msg == 'ready_file':
f = file('%s%s%s' %(home_path,current_path,cmd.split()[1]),'wb')
while True:
data=self.request.recv(1024)
if data == 'file_send_done':break
f.write(data)
f.close()
elif msg == 'ready_dir':
os.system('mkdir -p %s%s%s' %(home_path,current_path,cmd.split()[1]))
while True:
dir=self.request.recv(1024)
if dir == 'get_done':break
os.system('mkdir -p %s%s%s' %(home_path,current_path,dir))
while True:
filename=self.request.recv(1024)
if filename == 'dir_send_done':break
f = file('%s%s%s/%s' %(home_path,current_path,dir,filename),'wb')
while True:
data=self.request.recv(1024)
if data == 'file_send_done':break
f.write(data)
f.close()
self.request.sendall('%s/%s\t\033[32mfile_done\033[m' %(dir,filename))
self.request.sendall('%s\t\033[32mdir_done\033[m' %(dir))
elif msg == 'unknown_file':
continue
else:
results = cmd.split()[0] + ': Command not found'
self.request.sendall(results)
if __name__ == '__main__':
HOST,PORT = '10.152.14.85',50007
server = SocketServer.ThreadingTCPServer((HOST,PORT),MyTCP)
server.serve_forever()
ftpmanage
#!/usr/bin/python
#manage_ftp.py
import cPickle
import sys
import md5
import os
import getpass
def filer(file1):
try:
f = file(file1,'rb')
return cPickle.load(f)
except IOError:
return {}
except EOFError:
return {}
f.close()
def filew(file1,content):
f = file(file1,'wb')
cPickle.dump(content,f)
f.close()
while True:
print '''
1.add user
2.del user
3.change password
4.query user
0.exit
'''
i = raw_input(':').strip()
userinfo=filer('user.pkl')
if i == '':
continue
elif i == '1':
while True:
user=raw_input('user name:').strip()
if user.isalnum():
i = 0
while i<3:
passwd=getpass.getpass('passwd:').strip()
if passwd == '':
continue
else:
passwd1=getpass.getpass('Confirm password:').strip()
if passwd == passwd1:
mpasswd = md5.new(passwd).hexdigest()
userinfo[user] = mpasswd
os.system('mkdir -p %s' %user)
print '%s creating successful ' %user
break
else:
print "Passwords don't match "
i = i + 1
continue
else:
print 'Too many wrong'
continue
break
else:
print 'user not legal'
continue
elif i == '2':
user=raw_input('user name:').strip()
if userinfo.has_key(user):
del userinfo[user]
print 'Delete users successfully'
else:
print 'user not exist'
continue
elif i == '3':
user=raw_input('user name:').strip()
if userinfo.has_key(user):
i = 0
while i<3:
passwd=getpass.getpass('passwd:').strip()
if passwd == '':
continue
else:
passwd1=getpass.getpass('Confirm password:').strip()
if passwd == passwd1:
mpasswd = md5.new(passwd).hexdigest()
userinfo[user] = mpasswd
print '%s password is changed' %user
break
else:
print "Passwords don't match "
i = i + 1
continue
else:
print 'Too many wrong'
continue
else:
print 'user not exist'
continue
elif i == '4':
print userinfo.keys()
elif i == '0':
sys.exit()
else:
print 'select error'
continue
filew('user.pkl',content=userinfo)
ftpclient
#!/usr/bin/python
#ftpclient.py
import socket
import os
import getpass
from time import sleep
HOST='10.152.14.85'
PORT=50007
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((HOST,PORT))
while True:
user = raw_input('user:').strip()
if user.isalnum():
while True:
passwd = getpass.getpass('passwd:').strip()
s.sendall(user + ' ' + passwd)
servercmd=s.recv(1024)
if servercmd == 'login successful':
print '\033[32m%s\033[m' %servercmd
break
else:
print servercmd
while True:
cmd=raw_input('FTP>').strip()
if cmd == '':
continue
if cmd.split()[0] == 'get':
if cmd == 'get':continue
for i in cmd.split()[1:]:
if os.path.exists(i):
confirm = raw_input("\033[31mPlease confirm whether the cover %s(Y/N):\033[m" %(i)).upper().startswith('Y')
if not confirm:
print '%s cancel' %i
continue
s.sendall('get ' + i)
servercmd=s.recv(1024)
if servercmd == 'inexistence':
print '%s \t\033[32minexistence\033[m' %i
continue
elif servercmd == 'ready_file':
f = file(i,'wb')
while True:
data=s.recv(1024)
if data == 'get_done':break
f.write(data)
f.close()
print '%s \t\033[32mfile_done\033[m' %(i)
elif servercmd == 'ready_dir':
try:
os.makedirs(i)
except:
pass
while True:
serverdir=s.recv(1024)
if serverdir == 'get_done':break
os.system('mkdir -p %s' %serverdir)
print '%s \t\033[32mdir_done\033[m' %(serverdir)
while True:
serverfile=s.recv(1024)
if serverfile == 'dir_get_done':break
f = file('%s/%s' %(serverdir,serverfile),'wb')
while True:
data=s.recv(1024)
if data == 'file_get_done':break
f.write(data)
f.close()
print '%s/%s \t\033[32mfile_done\033[m' %(serverdir,serverfile)
elif cmd.split()[0] == 'send':
if cmd == 'send':continue
for i in cmd.split()[1:]:
if not os.path.exists(i):
print '%s\t\033[31minexistence\033[m' %i
continue
s.sendall('send ' + i)
servercmd=s.recv(1024)
if servercmd == 'existing':
confirm = raw_input("\033[31mPlease confirm whether the cover %s(Y/N):\033[m" %(i)).upper().startswith('Y')
if confirm:
s.sendall('cover')
servercmd=s.recv(1024)
else:
s.sendall('cancel')
print '%s\tcancel' %i
continue
if os.path.isfile(i):
s.sendall('ready_file')
sleep(0.5)
f = file(i,'rb')
s.send(f.read())
sleep(0.5)
s.sendall('file_send_done')
print '%s\t\033[32mfile done\033[m' %(cmd.split()[1])
f.close()
elif os.path.isdir(i):
s.sendall('ready_dir')
sleep(0.5)
for dirpath in os.walk(i):
dir=dirpath[0].replace('%s/' %os.popen('pwd').read().strip(),'',1)
s.sendall(dir)
sleep(0.5)
for filename in dirpath[2]:
s.sendall(filename)
sleep(0.5)
f = file('%s/%s' %(dirpath[0],filename),'rb')
s.send(f.read())
f.close()
sleep(0.5)
s.sendall('file_send_done')
msg=s.recv(1024)
print msg
else:
s.sendall('dir_send_done')
msg=s.recv(1024)
print msg
else:
s.sendall('unknown_file')
print '%s\t\033[31munknown type\033[m' %i
continue
sleep(0.5)
s.sendall('get_done')
elif cmd.split()[0] == 'cdir':
if cmd == 'cdir':continue
s.sendall(cmd)
data=s.recv(1024)
print data
continue
elif cmd == 'ls':
list=os.popen(cmd).read().strip().split('\n')
if list:
dirlist,filelist = '',''
for i in list:
if os.path.isdir(i):
dirlist = dirlist + '\033[32m' + i + '\033[m\t'
else:
filelist = filelist + i + '\t'
results = dirlist + filelist
else:
results = '\033[31mnot find\033[m'
print results
continue
elif cmd == 'pwd':
os.system(cmd)
elif cmd.split()[0] == 'cd':
try:
os.chdir(cmd.split()[1])
except:
print '\033[31mcd failure\033[m'
elif cmd == 'dir':
s.sendall(cmd)
data=s.recv(1024)
print data
continue
elif cmd == 'pdir':
s.sendall(cmd)
data=s.recv(1024)
print data
continue
elif cmd.split()[0] == 'mdir':
if cmd == 'mdir':continue
s.sendall(cmd)
data=s.recv(1024)
print data
continue
elif cmd.split()[0] == 'help':
print '''
get [file] [dir]
send [file] [dir]
dir
mdir
cdir
pdir
pwd
md
cd
ls
help
quit
'''
continue
elif cmd == 'quit':
break
else:
print '\033[31m%s: Command not found,Please see the "help"\033[m' %cmd
else:
continue
break
s.close()
扫描主机开放端口
#!/usr/bin/env python
import socket
def check_server(address,port):
s=socket.socket()
try:
s.connect((address,port))
return True
except socket.error,e:
return False
if __name__=='__main__':
from optparse import OptionParser
parser=OptionParser()
parser.add_option("-a","--address",dest="address",default='localhost',help="Address for server",metavar="ADDRESS")
parser.add_option("-s","--start",dest="start_port",type="int",default=1,help="start port",metavar="SPORT")
parser.add_option("-e","--end",dest="end_port",type="int",default=1,help="end port",metavar="EPORT")
(options,args)=parser.parse_args()
print 'options: %s, args: %s' % (options, args)
port=options.start_port
while(port<=options.end_port):
check = check_server(options.address, port)
if (check):
print 'Port %s is on' % port
port=port+1
zmq [网络通讯库]
# https://github.com/zeromq/pyzmq
# pip install pyzmq
# ZMQ是一个开源的、跨语言的、非常简洁的、非常高性能、非常灵活的网络通讯库
服务端程序
import zmq
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://127.0.0.1:1234") # 提供传输协议 INPROC IPC MULTICAST TCP
while True :
msg = socket.recv()
socket.send(msg)
客户端端程序
import zmq
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://127.0.0.1:1234")
# socket.connect("tcp://127.0.0.1:6000") # 设置2个可以均衡负载请求到2个监听的server
msg_send = "xxx"socket.send(msg_send)
print "Send:", msg_send
msg_recv = socket.recv()
print "Receive:", msg_recv
epoll
https://docs.python.org/2/library/select.html # python官网
epoll短链接server
# 原文 http://my.oschina.net/moooofly/blog/147297
# 此代码还有改进地方,在接收数据和发送数据都是阻塞死循环处理,必须等待全部接收完毕才会继续操作
server端代码:
#!/usr/bin/python
#-*- coding:utf-8 -*-
import socket, logging
import select, errno
logger = logging.getLogger("network-server")
def InitLog():
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler("network-server.log")
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
if __name__ == "__main__":
InitLog()
try:
# 创建 TCP socket 作为监听 socket
listen_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
except socket.error, msg:
logger.error("create socket failed")
try:
# 设置 SO_REUSEADDR 选项
listen_fd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error, msg:
logger.error("setsocketopt SO_REUSEADDR failed")
try:
# 进行 bind -- 此处未指定 ip 地址,即 bind 了全部网卡 ip 上
listen_fd.bind(('', 2003))
except socket.error, msg:
logger.error("bind failed")
try:
# 设置 listen 的 backlog 数
listen_fd.listen(10)
except socket.error, msg:
logger.error(msg)
try:
# 创建 epoll 句柄
epoll_fd = select.epoll()
# 向 epoll 句柄中注册 监听 socket 的 可读 事件
epoll_fd.register(listen_fd.fileno(), select.EPOLLIN)
except select.error, msg:
logger.error(msg)
connections = {}
addresses = {}
datalist = {}
while True:
# epoll 进行 fd 扫描的地方 -- 未指定超时时间则为阻塞等待
epoll_list = epoll_fd.poll()
for fd, events in epoll_list:
# 若为监听 fd 被激活
if fd == listen_fd.fileno():
# 进行 accept -- 获得连接上来 client 的 ip 和 port,以及 socket 句柄
conn, addr = listen_fd.accept()
logger.debug("accept connection from %s, %d, fd = %d" % (addr[0], addr[1], conn.fileno()))
# 将连接 socket 设置为 非阻塞
conn.setblocking(0)
# 向 epoll 句柄中注册 连接 socket 的 可读 事件
epoll_fd.register(conn.fileno(), select.EPOLLIN | select.EPOLLET)
# 将 conn 和 addr 信息分别保存起来
connections[conn.fileno()] = conn
addresses[conn.fileno()] = addr
elif select.EPOLLIN & events:
# 有 可读 事件激活
datas = ''
while True:
try:
# 从激活 fd 上 recv 10 字节数据
data = connections[fd].recv(10)
# 若当前没有接收到数据,并且之前的累计数据也没有
if not data and not datas:
# 从 epoll 句柄中移除该 连接 fd
epoll_fd.unregister(fd)
# server 侧主动关闭该 连接 fd
connections[fd].close()
logger.debug("%s, %d closed" % (addresses[fd][0], addresses[fd][1]))
break
else:
# 将接收到的数据拼接保存在 datas 中
datas += data
except socket.error, msg:
# 在 非阻塞 socket 上进行 recv 需要处理 读穿 的情况
# 这里实际上是利用 读穿 出 异常 的方式跳到这里进行后续处理
if msg.errno == errno.EAGAIN:
logger.debug("%s receive %s" % (fd, datas))
# 将已接收数据保存起来
datalist[fd] = datas
# 更新 epoll 句柄中连接d 注册事件为 可写
epoll_fd.modify(fd, select.EPOLLET | select.EPOLLOUT)
break
else:
# 出错处理
epoll_fd.unregister(fd)
connections[fd].close()
logger.error(msg)
break
elif select.EPOLLHUP & events:
# 有 HUP 事件激活
epoll_fd.unregister(fd)
connections[fd].close()
logger.debug("%s, %d closed" % (addresses[fd][0], addresses[fd][1]))
elif select.EPOLLOUT & events:
# 有 可写 事件激活
sendLen = 0
# 通过 while 循环确保将 buf 中的数据全部发送出去
while True:
# 将之前收到的数据发回 client -- 通过 sendLen 来控制发送位置
sendLen += connections[fd].send(datalist[fd][sendLen:])
# 在全部发送完毕后退出 while 循环
if sendLen == len(datalist[fd]):
break
# 更新 epoll 句柄中连接 fd 注册事件为 可读
epoll_fd.modify(fd, select.EPOLLIN | select.EPOLLET)
else:
# 其他 epoll 事件不进行处理
continue
client 端代码
import socket
import time
import logging
logger = logging.getLogger("network-client")
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler("network-client.log")
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
if __name__ == "__main__":
try:
connFd = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
except socket.error, msg:
logger.error(msg)
try:
connFd.connect(("127.0.0.1", 2003))
logger.debug("connect to network server success")
except socket.error,msg:
logger.error(msg)
for i in range(1, 11):
data = "The Number is %d" % i
if connFd.send(data) != len(data):
logger.error("send data to network server failed")
break
readData = connFd.recv(1024)
print readData
time.sleep(1)
connFd.close()
4 mysql
# yum install mysql-devel python-tools gcc openssl-devel
pip install MySQL-python
# yum install python-MySQLdb MySQL-python
help(MySQLdb.connections.Connection) # 查看链接参数
conn=MySQLdb.connect(host='localhost',user='root',passwd='123456',db='fortress',port=3306) # 定义连接
#conn=MySQLdb.connect(unix_socket='/var/run/mysqld/mysqld.sock',user='root',passwd='123456') # 使用socket文件链接
conn.autocommit(True) # 自动提交
cur=conn.cursor() # 定义游标
conn.select_db('fortress') # 选择数据库
sqlcmd = 'insert into user(name,age) value(%s,%s)' # 定义sql命令
cur.executemany(sqlcmd,[('aa',1),('bb',2),('cc',3)]) # 插入多条值
cur.execute('delete from user where id=20') # 删除一条记录
cur.execute("update user set name='a' where id=20") # 更细数据
sqlresult = cur.fetchall() # 接收全部返回结果
conn.commit() # 提交
cur.close() # 关闭游标
conn.close() # 关闭连接
import MySQLdb
def mydb(dbcmdlist):
try:
conn=MySQLdb.connect(host='localhost',user='root',passwd='123456',db='fortress',port=3306)
conn.autocommit(True)
cur=conn.cursor()
cur.execute('create database if not exists fortress;') # 创建数据库
conn.select_db('fortress') # 选择数据库
cur.execute('drop table if exists log;') # 删除表
cur.execute('CREATE TABLE log ( id BIGINT(20) NOT NULL AUTO_INCREMENT, loginuser VARCHAR(50) DEFAULT NULL, remoteip VARCHAR(50) DEFAULT NULL, PRIMARY KEY (id) );') # 创建表
result=[]
for dbcmd in dbcmdlist:
cur.execute(dbcmd) # 执行sql
sqlresult = cur.fetchall() # 接收全部返回结果
result.append(sqlresult)
conn.commit() # 提交
cur.close()
conn.close()
return result
except MySQLdb.Error,e:
print 'mysql error msg: ',e
sqlcmd=[]
sqlcmd.append("insert into log (loginuser,remoteip)values('%s','%s');" %(loginuser,remoteip))
mydb(sqlcmd)
sqlcmd=[]
sqlcmd.append("select * from log;")
result = mydb(sqlcmd)
for i in result[0]:
print i
mysql链接失败重试
import MySQLdb as mysql
import time
class my():
def executeSQL(self, sql="select * from `serverinfo` limit 1;"):
while True:
try:
self.conn.ping()
break
except Exception,e:
print('warning: mysql test ping fail')
print(str(e))
try:
self.conn = mysql.connect(user="opsdeploy", passwd="123456", host='172.222.50.50', port=3306, db="ops_deploy", connect_timeout=10, compress=True, charset="utf8")
self.cursor = self.conn.cursor()
break
except Exception,e:
print("mysql reconnect fail ...")
print(str(e))
time.sleep(2)
try:
self.cursor.execute(sql)
self.conn.commit()
print self.cursor.fetchall()
except Exception,e:
print(str(e))
m=my()
m.executeSQL()
5 处理信号
信号的概念
信号(signal): 进程之间通讯的方式,是一种软件中断。一个进程一旦接收到信号就会打断原来的程序执行流程来处理信号。
发送信号一般有两种原因:
1(被动式) 内核检测到一个系统事件.例如子进程退出会像父进程发送SIGCHLD信号.键盘按下control+c会发送SIGINT信号
2(主动式) 通过系统调用kill来向指定进程发送信号
操作系统规定了进程收到信号以后的默认行为,可以通过绑定信号处理函数来修改进程收到信号以后的行为,有两个信号是不可更改的 SIGTOP 和 SIGKILL
如果一个进程收到一个SIGUSR1信号,然后执行信号绑定函数,第二个SIGUSR2信号又来了,第一个信号没有被处理完毕的话,第二个信号就会丢弃。
进程结束信号 SIGTERM 和 SIGKILL 的区别: SIGTERM 比较友好,进程能捕捉这个信号,根据您的需要来关闭程序。在关闭程序之前,您可以结束打开的记录文件和完成正在做的任务。在某些情况下,假如进程正在进行作业而且不能中断,那么进程可以忽略这个SIGTERM信号。
常见信号
kill -l # 查看linux提供的信号
SIGHUP 1 A # 终端挂起或者控制进程终止
SIGINT 2 A # 键盘终端进程(如control+c)
SIGQUIT 3 C # 键盘的退出键被按下
SIGILL 4 C # 非法指令
SIGABRT 6 C # 由abort(3)发出的退出指令
SIGFPE 8 C # 浮点异常
SIGKILL 9 AEF # Kill信号 立刻停止
SIGSEGV 11 C # 无效的内存引用
SIGPIPE 13 A # 管道破裂: 写一个没有读端口的管道
SIGALRM 14 A # 闹钟信号 由alarm(2)发出的信号
SIGTERM 15 A # 终止信号,可让程序安全退出 kill -15
SIGUSR1 30,10,16 A # 用户自定义信号1
SIGUSR2 31,12,17 A # 用户自定义信号2
SIGCHLD 20,17,18 B # 子进程结束自动向父进程发送SIGCHLD信号
SIGCONT 19,18,25 # 进程继续(曾被停止的进程)
SIGSTOP 17,19,23 DEF # 终止进程
SIGTSTP 18,20,24 D # 控制终端(tty)上按下停止键
SIGTTIN 21,21,26 D # 后台进程企图从控制终端读
SIGTTOU 22,22,27 D # 后台进程企图从控制终端写
缺省处理动作一项中的字母含义如下:
A 缺省的动作是终止进程
B 缺省的动作是忽略此信号,将该信号丢弃,不做处理
C 缺省的动作是终止进程并进行内核映像转储(dump core),内核映像转储是指将进程数据在内存的映像和进程在内核结构中的部分内容以一定格式转储到文件系统,并且进程退出执行,这样做的好处是为程序员提供了方便,使得他们可以得到进程当时执行时的数据值,允许他们确定转储的原因,并且可以调试他们的程序。
D 缺省的动作是停止进程,进入停止状况以后还能重新进行下去,一般是在调试的过程中(例如ptrace系统调用)
E 信号不能被捕获
F 信号不能被忽略
Python提供的信号
import signal
dir(signal)
['NSIG', 'SIGABRT', 'SIGALRM', 'SIGBUS', 'SIGCHLD', 'SIGCLD', 'SIGCONT', 'SIGFPE', 'SIGHUP', 'SIGILL', 'SIGINT', 'SIGIO', 'SIGIOT', 'SIGKILL', 'SIGPIPE', 'SIGPOLL', 'SIGPROF', 'SIGPWR', 'SIGQUIT', 'SIGRTMAX', 'SIGRTMIN', 'SIGSEGV', 'SIGSTOP', 'SIGSYS', 'SIGTERM', 'SIGTRAP', 'SIGTSTP', 'SIGTTIN', 'SIGTTOU', 'SIGURG', 'SIGUSR1', 'SIGUSR2', 'SIGVTALRM', 'SIGWINCH', 'SIGXCPU', 'SIGXFSZ', 'SIG_DFL', 'SIG_IGN', '__doc__', '__name__', 'alarm', 'default_int_handler', 'getsignal', 'pause', 'signal']
绑定信号处理函数
#encoding:utf8
import os,signal
from time import sleep
def onsignal_term(a,b):
print 'SIGTERM' # kill -15
signal.signal(signal.SIGTERM,onsignal_term) # 接收信号,执行相应函数
def onsignal_usr1(a,b):
print 'SIGUSR1' # kill -10
signal.signal(signal.SIGUSR1,onsignal_usr1)
while 1:
print 'ID',os.getpid()
sleep(10)
通过另外一个进程发送信号
import os,signal
os.kill(16175,signal.SIGTERM) # 发送信号,16175是绑定信号处理函数的进程pid,需要自行修改
os.kill(16175,signal.SIGUSR1)
父进程接收子进程结束发送的SIGCHLD信号
#encoding:utf8
import os,signal
from time import sleep
def onsigchld(a,b):
print '收到子进程结束信号'
signal.signal(signal.SIGCHLD,onsigchld)
pid = os.fork() # 创建一个子进程,复制父进程所有资源操作
if pid == 0: # 通过判断子进程os.fork()是否等于0,分别同时执行父进程与子进程操作
print '我是子进程,pid是',os.getpid()
sleep(2)
else:
print '我是父进程,pid是',os.getpid()
os.wait() # 等待子进程结束
接收信号的程序,另外一端使用多线程向这个进程发送信号,会遗漏一些信号
#encoding:utf8
import os
import signal
from time import sleep
import Queue
QCOUNT = Queue.Queue() # 初始化队列
def onsigchld(a,b):
'''收到信号后向队列中插入一个数字1'''
print '收到SIGUSR1信号'
sleep(1)
QCOUNT.put(1) # 向队列中写入
signal.signal(signal.SIGUSR1,onsigchld) # 绑定信号处理函数
while 1:
print '我的pid是',os.getpid()
print '现在队列中元素的个数是',QCOUNT.qsize()
sleep(2)
多线程发信号端的程序
#encoding:utf8
import threading
import os
import signal
def sendusr1():
print '发送信号'
os.kill(17788, signal.SIGUSR1) # 这里的进程id需要写前一个程序实际运行的pid
WORKER = []
for i in range(1, 7): # 开启6个线程
threadinstance = threading.Thread(target = sendusr1)
WORKER.append(threadinstance)
for i in WORKER:
i.start()
for i in WORKER:
i.join()
print '主线程完成'
6 缓存数据库
python使用memcache
easy_install python-memcached # 安装(python2.7+)
import memcache
mc = memcache.Client(['10.152.14.85:12000'],debug=True) # 也可以使用socket直接连接IP端口
mc.set('name','luo',60)
mc.get('name')
mc.delete('name1')
# 豆瓣的python-memcache模块,大于1M自动切割 性能是纯python的3倍+
https://code.google.com/p/python-libmemcached/
保存数据
set(key,value,timeout) # 把key映射到value,timeout指的是什么时候这个映射失效
add(key,value,timeout) # 仅当存储空间中不存在键相同的数据时才保存
replace(key,value,timeout) # 仅当存储空间中存在键相同的数据时才保存
获取数据
get(key) # 返回key所指向的value
get_multi(key1,key2,key3) # 可以非同步地同时取得多个键值, 比循环调用get快数十倍
python使用mongodb
# 新版本
http://api.mongodb.org/python/2.7.2/tutorial.html
http://api.mongodb.org/python/current/examples/custom_type.html
easy_install pymongo # 安装
import pymongo
cl = pymongo.MongoClient("127.0.0.1", 27017)
db = cl.ops # 选择库
db.name # 查看库名
db.collection_names() # 查看所有文档
db.project # 选择文档
db.project.insert({'name':'live','group':'a'})
db.project.insert({'name':'news','group':'b'})
db.project.find_one({'group':'a'})
for post in db.project.find():
print post['name']
db.project.remove()
# 执行mongo命令
# https://api.mongodb.com/python/current/api/pymongo/database.html
db.command("filemd5", object_id, root=file_root)
db.command("dropUser", "user")
db.command("createUser", "admin", pwd="password", roles=["root"])
for x,y in db.command("currentOp").items():
print x,y
# currentOp在mongo3.9废弃,建议使用 aggregate()
with client.admin.aggregate([{"$currentOp": {}}]) as cursor:
for operation in cursor:
print(operation)
python使用redis
https://pypi.python.org/pypi/redis # redis的python官网
pip install redis OR easy_install redis # 安装
http://redis.readthedocs.org/en/latest/index.html # redis命令详解
http://redis.readthedocs.org/en/2.4/index.html
import redis
rds = redis.Redis(host=host, port=port, password=passwd, socket_timeout=10,db=0)
rds.info() # redis信息
rds.set(key, value) # 将值value关联到key
rds.get(key) # 取key值
rds.delete(key1,key2) # 删除key
rds.rename(key,new_key2) # 将key改名 存在覆盖
rds.seten(key,value) # 将值value关联到key,如果key存在不做任何动作
rds.setex(key, value, 10800) # 将值value关联到key,并设置key的过期时间
rds.mset() # 同时设置一个或多个key-value对 如果key存在则覆盖
rds.msetnx() # 同时设置一个或多个key-value对 如果有key存在则失败
rds.mget(key1, key2, key3) # 取多个key值 不存在返回nil
rds.expire(key seconds) # 设置key的过期时间
rds.persist(key) # 移除key的过期时间
rds.ttl(key) # 查看超时时间 -1为不过期
rds.sadd(key,value1) # 将value1加入集合中 集合不重复
rds.smembers(key) # 返回key中所有成员
rds.scard(key) # 集合中元素的数量
rds.srandmember(key) # 对集合随机返回一个元素 而不对集合改动 当key不存在或key是空集时,返回nil
rds.sinter(key1,key2) # 两个集合的交集
rds.sdiff(key1,key2) # 两个集合的差集
rds.sismember(key,value) # 判断value元素是否是集合key的成员 1存在 0不存在
rds.lpush(key,value1) # 将value1加入列表中 从左到右
rds.lpop(key,value1) # 移除并返回列表key的头元素
rds.llen(key) # 返回列表长度
rds.sort(key) # 对列表、集合、有序集合排序[大列表排序非常影响性能,甚至把redis拖死]
rds.append(key,value) # 字符串拼接为新的value
rds.ltrim(key, 0, -10) # 保留指定区间内的元素,不在都被删除 0第一个 -1最后一个
rds.incr(key , amount=1) # 计数加1 默认1或请先设置key的数值
rds.decr(key) # 计数减1 请先设置key的数值
rds.save() # 保存数据
python使用kestrel队列
# pykestrel
import kestrel
q = kestrel.Client(servers=['127.0.0.1:22133'],queue='test_queue')
q.add('some test job')
job = q.get() # 从队列读取工作
job = q.peek() # 读取下一份工作
# 读取一组工作
while True:
job = q.next(timeout=10) # 完成工作并获取下一个工作,如果没有工作,则等待10秒
if job is not None:
try:
# 流程工作
except:
q.abort() # 标记失败工作
q.finish() # 完成最后工作
q.close() # 关闭连接
kestrel状态检查
# kestrel支持memcache协议客户端
#!/usr/local/bin/python
# 10.13.81.125 22133 10000
import memcache
import sys
import traceback
ip="%s:%s" % (sys.argv[1],sys.argv[2])
try:
mc = memcache.Client([ip,])
st=mc.get_stats()
except:
print "kestrel connection exception"
sys.exit(2)
if st:
for s in st[0][1].keys():
if s.startswith('queue_') and s.endswith('_mem_items'):
num = int(st[0][1][s])
if num > int(sys.argv[3]):
print "%s block to %s" %(s[6:-6],num)
sys.exit(2)
print "kestrel ok!"
sys.exit(0)
else:
print "kestrel down"
sys.exit(2)
python使用tarantool
# pip install tarantool-queue
from tarantool_queue import Queue
queue = Queue("localhost", 33013, 0) # 连接读写端口 空间0
tube = queue.tube("name_of_tube") #
tube.put([1, 2, 3])
task = tube.take()
task.data # take task and read data from it
task.ack() # move this task into state DONE
python-etcd
http://python-etcd.readthedocs.io/en/latest/
pip install python-etcd
import etcd
client = etcd.Client(host='etcd-01', port=2379)
client = etcd.Client( (('etcd-01', 2379), ('etcd-02', 2379), ('etcd-03', 2379)) ,allow_reconnect=True) # 集群多IP allow_reconnect 允许重连
# 增加 目录必须存在 # 目录: /v1/xuesong/
client.write('/v1/xuesong/10.10.10.10:8080', 'test')
# 获取指定路径的值
r = client.read('/v1/xuesong/10.10.10.10:8080' , recursive=True, sorted=True)
r.value
# 删除指定路径
client.delete('/v1/xuesong/10.10.10.10:8080')
# with ttl
client.write('/nodes/n2', 2, ttl=4) # sets the ttl to 4 seconds
# create only
client.write('/nodes/n3', 'test', prevExist=False)
# Compare and swap values atomically
client.write('/nodes/n3', 'test2', prevValue='test1') #this fails to write
client.write('/nodes/n3', 'test2', prevIndex=10) #this fails to write
# mkdir
client.write('/nodes/queue', None, dir=True)
# Append a value to a queue dir
client.write('/nodes/queue', 'test', append=True) #will write i.e. /nodes/queue/11
client.write('/nodes/queue', 'test2', append=True) #will write i.e. /nodes/queue/12
client.read('/nodes/n2').value # 获取单个键值
r = client.read('/nodes', recursive=True, sorted=True) # 递归查询目录
for i in r.children:
if not i.dir:
print("%s: %s" % (child.key,child.value))
client.read('/nodes/n2', wait=True) #Waits for a change in value in the key before returning.
client.read('/nodes/n2', wait=True, waitIndex=10)
try:
client.read('/invalid/path')
except etcd.EtcdKeyNotFound:
print "error"
client.delete('/nodes/n1')
client.delete('/nodes', dir=True) #spits an error if dir is not empty
client.delete('/nodes', recursive=True) #this works recursively
client.watch('/nodes/n1', recursive=True,timeout=0) # 递归获取改变值 阻塞直到有改变
# watch只会阻塞监视之后的一次改动,所以必须先递归read下所有路径,然后根据每次的watch进行更改
# 第一次read的时候,需要记录 etcd_index+1作为下一次watch的索引
index = client.read('/nodes/n1', recursive=True).etcd_index
while 1:
# watch后的索引是 modifiedIndex+1传给下一次的watch
index = client.watch('/nodes/n1', recursive=True, timeout=0, index=index+1).modifiedIndex
python操作zookeeper
https://kazoo.readthedocs.io/en/latest/basic_usage.html
pip install kazoo
from kazoo.client import KazooClient
zk = KazooClient(hosts='127.0.0.1:2181', read_only=True)
zk.start()
zk.get_children('/')
zk.stop()
python操作elasticsearch
http://elasticsearch-py.readthedocs.io/en/master/
from datetime import datetime
from elasticsearch import Elasticsearch
es = Elasticsearch(["host1", "host2"])
doc = {
'author': 'kimchy',
'text': 'Elasticsearch: cool. bonsai cool.',
'timestamp': datetime.now(),
}
res = es.index(index="live-", doc_type='tweet', id=1, body=doc)
print(res['created'])
res = es.get(index="live-", doc_type='tweet', id=1)
print(res['_source'])
es.indices.refresh(index="live-")
res = es.search(index="live-", body={"query": {"match_all": {}}})
7 http客户端
urllib2 [网络资源访问]
import urllib2
response = urllib2.urlopen('http://baidu.com')
print response.geturl() # url
headers = response.info()
print headers # web页面头部信息
print headers['date'] # 头部信息中的时间
date = response.read() # 返回页面所有信息[字符串]
# date = response.readlines() # 返回页面所有信息[列表]
for i in urllib2.urlopen('http://qq.com'): # 可直接迭代
print i,
下载文件
#!/usr/bin/env python
#encoding:utf8
import urllib2
url = 'http://www.01happy.com/wp-content/uploads/2012/09/bg.png'
file("./pic/%04d.png" % i, "wb").write(urllib2.urlopen(url).read())
抓取网页解析指定内容
#!/usr/bin/env python
#encoding:utf8
import urllib2
import urllib
import random
from bs4 import BeautifulSoup
url='http://www.aaammm.com/aaa/'
ua=["Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.143 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.143 Safari/537.36"]
browser = random.choice(ua)
req_header = {'User-Agent':browser,
'Accept':'text/html;q=0.9,*/*;q=0.8',
'Cookie':'BAIDUID=4C8274B52CFB79DEB4FBA9A7EC76A1BC:FG=1; BDUSS=1dCdU1WNFdxUll0R09XcnBZTkRrVVVNbWVnSkRKSVRPeVljOUswclBoLUNzVEpVQVFBQUFBJCQAAAAAAAAAAAEAAADEuZ8BcXVhbnpob3U3MjIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIIIkC1SCJAtUY; BD_UPN=123143; BD_HOME=1', # 添真实登陆后的Cookie 谷歌浏览器[F12 Network Documents Headers]
'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Connection':'close',
}
#data = urllib.urlencode({'name':'xuesong','id':'30' }) # urllib 的处理参数的方法,可以再urllib2中使用
data = urllib2.quote("pgv_ref=im.perinfo.perinfo.icon&rrr=pppp")
req_timeout = 10
try:
req = urllib2.Request(url,data=data,headers=req_header) # data为None 则方法为get,有date为post方法
html = urllib2.urlopen(req,data=None,req_timeout).read()
except urllib2.HTTPError as err:
print str(err)
except:
print "timeout"
print(html)
# 百度带Cookie后查看自己的用户
#for i in html.split('\n'):
# if 'bds.comm.user=' in i:
# print i
soup = BeautifulSoup(html)
for i in soup.find_all(target="_blank",attrs={"class": "usr-pic"}): # 条件看情况选择
if i.img:
print(i.get('href'))
模拟浏览器访问web页面 python3
#! /usr/bin/env python
# -*- coding=utf-8 -*-
import urllib.request
url = "http://www.baidu.com"
# AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1)',
'Accept':'text/html;q=0.9,*/*;q=0.8',
'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Connection':'close',
'Referer':None #注意如果依然不能抓取的话,这里可以设置抓取网站的host
}
opener = urllib.request.build_opener()
opener.addheaders = [headers]
data = opener.open(url).read()
print(data)
requests [替代urllib2]
# Requests是一个Python的HTTP客户端库
# 官方中文文档 http://cn.python-requests.org/zh_CN/latest/user/quickstart.html#id2
# 安装: sudo pip install requests
import requests
# 使用 logging 库时忽略 requests 库的日志
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# get方法提交表单
url = r'http://dict.youdao.com/search?le=eng&q={0}'.format(word.strip())
r = requests.get(url,timeout=2)
# get方法带参数 http://httpbin.org/get?key=val
payload = {'key1': 'value1', 'key2': 'value2'}
r = requests.get("http://httpbin.org/get", params=payload)
# post方法提交表单
QueryAdd='http://www.anti-spam.org.cn/Rbl/Query/Result'
r = requests.post(url=QueryAdd, data={'IP':'211.211.54.54'})
# 定制请求头post请求
payload = {'some': 'data'}
headers = {'content-type': 'application/json'}
r = requests.post(url, data=json.dumps(payload), headers=headers)
# https 需登录加auth
r = requests.get('https://baidu.com', auth=('user', 'pass'))
if r.ok: # 判断请求是否正常
print r.url # u'http://httpbin.org/get?key2=value2&key1=value1'
print r.status_code # 状态码
print r.content # 获取到的原始内容 可使用 BeautifulSoup4 解析处理判定结果
print r.text # 把原始内容转unicode编码
print r.headers # 响应头
print r.headers['content-type'] # 网页头信息 不存在为None
print r.cookies['example_cookie_name'] # 查看cookie
print r.history # 追踪重定向 [<Response [301]>] 开启重定向 allow_redirects=True
获取JSON
r = requests.get('https://github.com/timeline.json')
r.json()
获取图片
from PIL import Image
from StringIO import StringIO
i = Image.open(StringIO(r.content))
发送cookies到服务器
url = 'http://httpbin.org/cookies'
cookies = dict(cookies_are='working')
r = requests.get(url, cookies=cookies)
r.text '{"cookies": {"cookies_are": "working"}}'
在同一个Session实例发出的所有请求之间保持cookies
s = requests.Session()
s.get('http://httpbin.org/cookies/set/sessioncookie/123456789')
r = s.get("http://httpbin.org/cookies")
print r.text
会话对象能够跨请求保持某些参数
s = requests.Session()
s.auth = ('user', 'pass')
s.headers.update({'x-test': 'true'})
s.get('http://httpbin.org/headers', headers={'x-test2': 'true'}) # both 'x-test' and 'x-test2' are sent
ssl证书验证
requests.get('https://github.com', verify=True)
requests.get('https://kennethreitz.com', verify=False) # 忽略证书验证
requests.get('https://kennethreitz.com', cert=('/path/server.crt', '/path/key')) # 本地指定一个证书 正确 <Response [200]> 错误 SSLError
流式上传
with open('massive-body') as f:
requests.post('http://some.url/streamed', data=f)
流式请求
import requests
import json
r = requests.post('https://stream.twitter.com/1/statuses/filter.json',
data={'track': 'requests'}, auth=('username', 'password'), stream=True)
for line in r.iter_lines():
if line: # filter out keep-alive new lines
print json.loads(line)
自定义身份验证
from requests.auth import AuthBase
class PizzaAuth(AuthBase):
"""Attaches HTTP Pizza Authentication to the given Request object."""
def __init__(self, username):
# setup any auth-related data here
self.username = username
def __call__(self, r):
# modify and return the request
r.headers['X-Pizza'] = self.username
return r
requests.get('http://pizzabin.org/admin', auth=PizzaAuth('kenneth'))
基本身份认证
from requests.auth import HTTPBasicAuth
requests.get('https://api.github.com/user', auth=HTTPBasicAuth('user', 'pass'))
摘要式身份认证
from requests.auth import HTTPDigestAuth
url = 'http://httpbin.org/digest-auth/auth/user/pass'
requests.get(url, auth=HTTPDigestAuth('user', 'pass'))
代理
import requests
proxies = {
"http": "http://10.10.1.10:3128",
# "http": "http://user:pass@10.10.1.10:3128/", # 用户名密码
"https": "http://10.10.1.10:1080",
}
requests.get("http://example.org", proxies=proxies)
#也可以设置环境变量之间访问
export HTTP_PROXY="http://10.10.1.10:3128"
export HTTPS_PROXY="http://10.10.1.10:1080"
requests.session
import requests
import time
from bs4 import BeautifulSoup
session = requests.session()
login_url = "http://deploy.ixiaochuan.cn/login"
res_start = session.get(url=login_url)
bs = BeautifulSoup(res_start.text, "html.parser")
a = bs.select("#csrf_token")[0]
token = a.attrs.get("value")
login_data = {"username": (None, "weiqiang"), "password": (None, "Onei"), "submit": (None, "Login"),
"csrf_token": (None, token)}
res = session.post(url=login_url, files=login_data, allow_redirects=False)
print("login success")
BeautifulSoup [html\xml解析器]
# BeautifulSoup中文官方文档
# http://www.crummy.com/software/BeautifulSoup/bs3/documentation.zh.html
# http://www.crummy.com/software/BeautifulSoup/bs4/doc/index.zh.html
# Beautiful Soup将复杂HTML文档转换成一个复杂的树形结构,每个节点都是Python对象,所有对象可以归纳为4种: Tag , NavigableString , BeautifulSoup , Comment
导入模块
from BeautifulSoup import BeautifulSoup # For processing HTML 版本3.0 已停止更新
from BeautifulSoup import BeautifulStoneSoup # For processing XML
import BeautifulSoup # To get everything
from bs4 import BeautifulSoup # 版本4.0 bs4 安装: pip install BeautifulSoup4
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_doc) # 解析html文本 可以是 requests 提交返回的页面 results.content
print(soup.prettify()) # 输出解析后的结构
print(soup.title) # 指定标签内容
print(soup.title.name) # 标签名
print(soup.title.string) # 标签内容
print(soup.title.parent.name) # 上层标签名
print(soup.p) # <p class="title"><b>The Dormouse's story</b></p>
print(soup.p['class']) # u'title' class属性值
print(soup.a) # 找到第一个a标签的标签行
print(soup.find_all('a',limit=2)) # 找到a标签的行,最多为limit个
print(soup.find(id="link3")) # 标签内id为link3的标签行
print(soup.get_text()) # 从文档中获取所有文字内容
soup.find_all("a", text="Elsie") # 从文档中搜索关键字
soup.find(text=re.compile("sisters")) # 从文档中正则搜索关键字
soup.find_all("a", class_="sister") # 按CSS搜索
soup.find_all(id='link2',"table",attrs={"class": "status"},href=re.compile("elsie")) # 搜索方法
for i in soup.find_all('a',attrs={"class": "usr-pic"}): # 循环所有a标签的标签行
if i.a.img:
print(i.a.img.get("src")) # 取出当前a标签中的连接
Tag
# find_all 后循环的值是 Tag 不是字符串 不能直接截取
tag.text # 文本
tag.name
tag.name = "blockquote" # 查找name为 blockquote 的
tag['class']
tag.attrs # 按熟悉查找
tag['class'] = 'verybold'
del tag['class'] # 删除
print(tag.get('class')) # 打印属性值
print(i.get('href')) # 打印连接
cookielib [保留cookie登录页面]
ck = cookielib.CookieJar() # 通过 这个就可以实现请求带过去的COOKIE与发送回来的COOKIE值了。
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(ck)) # 获取到COOKIE
urllib2.install_opener(opener) # 此句设置urllib2的全局opener
content = urllib2.urlopen(url).read()
登录cacti取图片
#encoding:utf8
import urllib2
import urllib
import cookielib
def renrenBrower(url,user,password):
#查找form标签中的action提交地址
login_page = "http://10.10.10.19/cacti/index.php"
try:
#获得一个cookieJar实例
cj = cookielib.CookieJar()
#cookieJar作为参数,获得一个opener的实例
opener=urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
#伪装成一个正常的浏览器,避免有些web服务器拒绝访问
opener.addheaders = [('User-agent','Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)')]
#生成Post数据,含有登陆用户名密码,所有表单内的input中name值
data = urllib.urlencode({"action":"login","login_username":user,"login_password":password})
#以post的方法访问登陆页面,访问之后cookieJar会自定保存cookie
opener.open(login_page,data)
#以带cookie的方式访问页面
op=opener.open(url)
#读取页面源码
data=op.read()
#将图片写到本地
#file("1d.png" , "wb").write(data)
return data
except Exception,e:
print str(e)
print renrenBrower("http://10.10.10.19/cacti/graph_image.php?local_graph_id=1630&rra_id=0&view_type=tree&graph_start=1397525517&graph_end=1397611917","admin","admin")
例子2
import urllib, urllib2, cookielib
import os, time
headers = []
def login():
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
login_url = r'http://zhixing.bjtu.edu.cn/member.php?mod=logging&action=login&loginsubmit=yes&infloat=yes&lssubmit=yes&inajax=1'
login_data = urllib.urlencode({'cookietime': '2592000', 'handlekey': 'ls', 'password': 'xxx',
'quickforward': 'yes', 'username': 'GuoYuan'})
opener.addheaders = [('Host', 'zhixing.bjtu.edu.cn'),
('User-Agent', 'Mozilla/5.0 (Ubuntu; X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0'),
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),
('Accept-Language', 'en-us,en;q=0.5'),
('Accept-Encoding', 'gzip, deflate'),
('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7'),
('Connection', 'keep-alive'),
('Referer', 'http://zhixing.bjtu.edu.cn/forum.php'),]
opener.open(login_url, login_data)
return opener
if __name__ == '__main__':
opener = login()
url = r'http://zhixing.bjtu.edu.cn/forum.php?mod=topicadmin&action=moderate&optgroup=2&modsubmit=yes&infloat=yes&inajax=1'
data = {'fid': '601', 'formhash': '0cdd1596', 'frommodcp': '', 'handlekey': 'mods',
'listextra': 'page%3D62', 'moderate[]': '496146', 'operations[]': 'type', 'reason': '...',
'redirect': r'http://zhixing.bjtu.edu.cn/thread-496146-1-1.html', 'typeid': '779'}
data2 = [(k, v) for k,v in data.iteritems()]
cnt = 0
for tid in range(493022, 496146 + 1):
cnt += 1
if cnt % 20 == 0: print
print tid,
data2.append(('moderate[]', str(tid)))
if cnt % 40 == 0 or cnt == 496146:
request = urllib2.Request(url=url, data=urllib.urlencode(data2))
print opener.open(request).read()
data2 = [(k, v) for k,v in data.iteritems()]
httplib [http协议的客户端]
import httplib
conn3 = httplib.HTTPConnection('www.baidu.com',80,True,10)
aiohttp [检索网页的客户端]
# 需要python3.3+
# http://aiohttp.readthedocs.org/en/v0.12.0/
import aiohttp
def get_body(url):
response = yield from aiohttp.request('GET', url)
return (yield from response.read())
response = yield from aiohttp.request('GET', 'http://python.org')
body = yield from response.read()
print(body)
# 用 asyncio 配合协程抓取页面
yield from asyncio.wait_for(request('GET', url), 10)
http_server
import asyncio
from aiohttp import web
@asyncio.coroutine
def handle(request):
name = request.match_info.get('name', "Anonymous")
text = "Hello, " + name
return web.Response(body=text.encode('utf-8'))
@asyncio.coroutine
def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/{name}', handle)
srv = yield from loop.create_server(app.make_handler(),
'127.0.0.1', 8080)
print("Server started at http://127.0.0.1:8080")
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever()
查看网页图片尺寸类型
#将图片读入内存
#!/usr/bin/env python
#encoding=utf-8
import cStringIO, urllib2, Image
url = 'http://www.01happy.com/wp-content/uploads/2012/09/bg.png'
file = urllib2.urlopen(url)
tmpIm = cStringIO.StringIO(file.read())
im = Image.open(tmpIm)
print im.format, im.size, im.mode
爬虫
#!/usr/bin/env python
#encoding:utf-8
#sudo pip install BeautifulSoup
import requests
from BeautifulSoup import BeautifulSoup
import re
baseurl = 'http://blog.sina.com.cn/s/articlelist_1191258123_0_1.html'
r = requests.get(baseurl)
for url in re.findall('<a.*?</a>', r.content, re.S):
if url.startswith('<a title='):
with open(r'd:/final.txt', 'ab') as f:
f.write(url + '\n')
linkfile = open(r'd:/final.txt', 'rb')
soup = BeautifulSoup(linkfile)
for link in soup.findAll('a'):
#print link.get('title') + ': ' + link.get('href')
ss = requests.get(link.get('href'))
for content in re.findall('<div id="sina_keyword_ad_area2" class="articalContent ">.*?</div>', ss.content, re.S):
with open(r'd:/myftp/%s.txt'%link.get('title').strip('<>'), 'wb') as f:
f.write(content)
print '%s has been copied.' % link.get('title')
反垃圾邮件提交申诉
#很遗憾,反垃圾邮件联盟改版后加了验证码
#!/usr/bin/env python
#encoding:utf-8
import requests
import re
IpList=['113.212.91.25','113.212.91.23']
QueryAdd='http://www.anti-spam.org.cn/Rbl/Query/Result'
ComplaintAdd='http://www.anti-spam.org.cn/Rbl/Getout/Submit'
data = {
'CONTENT':'''我们是一家正规的XXX。xxxxxxx。恳请将我们的发送服务器IP移出黑名单。谢谢!
处理措施:
1.XXXX。
2.XXXX。''',
'CORP':'abc.com',
'WWW':'www.abc.cm',
'NAME':'def',
'MAIL':'def@163.com.cn',
'TEL':'010-50000000',
'LEVEL':'0',
}
for Ip in IpList:
query = requests.post(url=QueryAdd, data={'IP':Ip}) # 黑名单查询
if query.ok:
if re.findall(u'\u7533\u8bc9\u8131\u79bb', query.text, re.S): # 查找关键字 申诉脱离 既表明在黑名单中
data['IP']=Ip
complaint = requests.post(url=ComplaintAdd, data=data) # 提交申诉
if complaint.ok:
if re.findall(u'\u60a8\u7684\u9ed1\u540d\u5355\u8131\u79bb\u7533\u8bf7\u5df2\u63d0\u4ea4', complaint.text, re.S):
status='申请提交'
elif re.findall(u'\u8131\u79bb\u7533\u8bf7\u5df2\u88ab\u4ed6\u4eba\u63d0\u4ea4', complaint.text, re.S):
status='重复提交'
elif re.findall(u'\u7533\u8bf7\u7531\u4e8e\u8fd1\u671f\u5185\u6709\u88ab\u62d2\u7edd\u7684\u8bb0\u5f55', complaint.text, re.S):
status='近期拒绝'
else:
status='异常'
else:
status='正常'
print '%s %s' %(Ip,status)
有道词典
#!/usr/bin/env python
import requests
from bs4 import BeautifulSoup
# bs4安装: pip install BeautifulSoup4
def youdao(word):
url = r'http://dict.youdao.com/search?le=eng&q={0}'.format(word.strip())
r = requests.get(url)
if r.ok:
soup = BeautifulSoup(r.content)
div = soup.find_all('div', class_='trans-container')[:1] # find_all是bs4的方法
ul = BeautifulSoup(str(div[0]))
li = ul.find_all('li')
for mean in li:
print mean.text
def query():
print('Created by @littlepy, QQ:185635687')
while True:
word = raw_input('>>>')
youdao(word)
if __name__ == '__main__':
query()
python启动http服务提供访问或下载
python -m SimpleHTTPServer 9900
8 并发
#线程安全/竞争条件,锁/死锁检测,线程池,生产消费模型,伪并发,微线程,协程
#Stackless Python 是Python编程语言的一个增强版本,它使程序员从基于线程的编程方式中获得好处,并避免传统线程所带来的性能与复杂度问题。Stackless为 Python带来的微线程扩展,是一种低开销、轻量级的便利工具
Queue队列
import Queue
q = = Queue.Queue(3)
q.put('a', True, 5) # True等待超时时间, False不等待
if q.full(): # 队列满了返回True,反之False
q.qsize() # 队列长度
workQueue.queue.clear() # 清空队列
q.get(True,5) # True等待超时时间, False不等待
threading多线程
thread
start_new_thread(function,args kwargs=None) # 产生一个新的线程
allocate_lock() # 分配一个LockType类型的锁对象
exit() # 让线程退出
acquire(wait=None) # 尝试获取锁对象
locked() # 如果获取了锁对象返回True
release() # 释放锁
thread例子
#!/usr/bin/env python
#thread_test.py
#不支持守护进程
import thread
from time import sleep,ctime
loops = [4,2]
def loop(nloop,nsec,lock):
print 'start loop %s at:%s' % (nloop,ctime())
sleep(nsec)
print 'loop %s done at: %s' % (nloop, ctime())
lock.release() # 分配已获得的锁,操作结束后释放相应的锁通知主线程
def main():
print 'starting at:',ctime()
locks = []
nloops = range(len(loops))
for i in nloops:
lock = thread.allocate_lock() # 创建一个锁
lock.acquire() # 调用各个锁的acquire()函数获得锁
locks.append(lock) # 把锁放到锁列表locks中
for i in nloops:
thread.start_new_thread(loop,(i,loops[i],locks[i])) # 创建线程
for i in nloops:
while locks[i].locked():pass # 等待全部解锁才继续运行
print 'all DONE at:',ctime()
if __name__ == '__main__':
main()
thread例子1
#coding=utf-8
import thread,time,os
def f(name):
i =3
while i:
time.sleep(1)
print name
i -= 1
# os._exit() 会把整个进程关闭
os._exit(22)
if __name__ == '__main__':
thread.start_new_thread(f,("th1",))
while 1:
pass
os._exit(0)
threading
Thread # 表示一个线程的执行的对象
start() # 开始线程的执行
run() # 定义线程的功能的函数(一般会被子类重写)
join(timeout=None) # 允许主线程等待线程结束,程序挂起,直到线程结束;如果给了timeout,则最多等待timeout秒.
getName() # 返回线程的名字
setName(name) # 设置线程的名字
isAlive() # 布尔标志,表示这个线程是否还在运行中
isDaemon() # 返回线程的daemon标志
setDaemon(daemonic) # 后台线程,把线程的daemon标志设置为daemonic(一定要在调用start()函数前调用)
# 默认主线程在退出时会等待所有子线程的结束。如果希望主线程不等待子线程,而是在退出时自动结束所有的子线程,就需要设置子线程为后台线程(daemon)
Lock # 锁原语对象
Rlock # 可重入锁对象.使单线程可以在此获得已获得了的锁(递归锁定)
Condition # 条件变量对象能让一个线程停下来,等待其他线程满足了某个条件.如状态改变或值的改变
Event # 通用的条件变量.多个线程可以等待某个事件的发生,在事件发生后,所有的线程都会被激活
Semaphore # 为等待锁的线程提供一个类似等候室的结构
BoundedSemaphore # 与Semaphore类似,只是不允许超过初始值
Time # 与Thread相似,只是他要等待一段时间后才开始运行
activeCount() # 当前活动的线程对象的数量
currentThread() # 返回当前线程对象
enumerate() # 返回当前活动线程的列表
settrace(func) # 为所有线程设置一个跟踪函数
setprofile(func) # 为所有线程设置一个profile函数
threading例子1
#!/usr/bin/env python
#encoding:utf8
import threading
from Queue import Queue
from time import sleep,ctime
class ThreadFunc(object):
def __init__(self,func,args,name=''):
self.name=name
self.func=func # loop
self.args=args # (i,iplist[i],queue)
def __call__(self):
apply(self.func,self.args) # 函数apply() 执行loop函数并传递元组参数
def loop(nloop,ip,queue):
print 'start',nloop,'at:',ctime()
queue.put(ip)
sleep(2)
print 'loop',nloop,'done at:',ctime()
if __name__ == '__main__':
threads = []
queue = Queue()
iplist = ['192.168.1.2','192.168.1.3','192.168.1.4','192.168.1.5','192.168.1.6','192.168.1.7','192.168.1.8']
nloops = range(len(iplist))
for i in nloops:
t = threading.Thread(target=ThreadFunc(loop,(i,iplist[i],queue),loop.__name__))
threads.append(t)
for i in nloops:
threads[i].start()
for i in nloops:
threads[i].join()
for i in nloops:
print queue.get()
threading例子2
#!/usr/bin/env python
#encoding:utf8
from Queue import Queue
import random,time,threading
class Producer(threading.Thread):
def __init__(self, t_name, queue):
threading.Thread.__init__(self, name=t_name)
self.data=queue
def run(self):
for i in range(5):
print "%s: %s is producing %d to the queue!\n" %(time.ctime(), self.getName(), i)
self.data.put(i)
self.data.put(i*i)
time.sleep(2)
print "%s: %s finished!" %(time.ctime(), self.getName())
class Consumer(threading.Thread):
def __init__(self, t_name, queue):
threading.Thread.__init__(self, name=t_name)
self.data=queue
def run(self):
for i in range(10):
val = self.data.get()
print "%s: %s is consuming. %d in the queue is consumed!\n" %(time.ctime(), self.getName(), val)
print "%s: %s finished!" %(time.ctime(), self.getName())
if __name__ == '__main__':
queue = Queue()
producer = Producer('Pro.', queue)
consumer = Consumer('Con.', queue)
producer.start()
consumer.start()
producer.join()
consumer.join()
threading例子3
# 启动线程后自动执行 run函数其他不可以
import threading
import time
class Th(threading.Thread):
def __init__(self,name):
threading.Thread.__init__(self)
self.t_name=name
self.daemon = True # 默认为false,让主线程等待处理完成
def run(self):
time.sleep(1)
print "this is " + self.t_name
if __name__ == '__main__':
thread1 = Th("Th_1")
thread1.start()
threading例子4
import threading
import time
class Th(threading.Thread):
def __init__(self,thread_name):
threading.Thread.__init__(self)
self.setName(thread_name)
def run(self):
threadLock.acquire()
print self.getName()
for i in range(3):
time.sleep(1)
print str(i)
print self.getName() + " is over"
threadLock.release()
if __name__ == '__main__':
threadLock = threading.Lock()
thread1 = Th("Th_1")
thread2 = Th("Th_2")
thread1.start()
thread2.start()
后台线程
import threading
import time,random
class MyThread(threading.Thread):
def run(self):
wait_time=random.randrange(1,10)
print "%s will wait %d seconds" % (self.name, wait_time)
time.sleep(wait_time)
print "%s finished!" % self.name
if __name__=="__main__":
for i in range(5):
t = MyThread()
t.setDaemon(True) # 设置为后台线程,主线程完成时不等待子线程完成就结束
t.start()
threading控制最大并发_查询日志中IP信息
#!/usr/bin/env python
#coding:utf-8
import urllib2
import json
import threading
import time
'''
by:某大牛
QQ:185635687
这个是多线程并发控制. 如果要改成多进程,只需把threading 换成 mulitprocessing.Process , 对, 就是换个名字而已.
'''
#获取ip 及其出现次数
def ip_dic(file_obj, dic):
for i in file_obj:
if i:
ip=i.split('-')[0].strip()
if ip in dic.keys():
dic[ip]=dic[ip] + 1
else:
dic[ip]=1
return dic.iteritems()
#目标函数
def get_data(url, ipcounts):
data=urllib2.urlopen(url).read()
datadict=json.loads(data)
fdata = u"ip:%s---%s,%s,%s,%s,%s" %(datadict["data"]["ip"],ipcounts,datadict["data"]["country"],datadict["data"]["region"],datadict["data"]["city"],datadict["data"]["isp"])
print fdata
#多线程
def threads(iters):
thread_pool = []
for k in iters:
url = "http://ip.taobao.com/service/getIpInfo.php?ip="
ipcounts = k[1]
url = (url + k[0]).strip()
t = threading.Thread(target=get_data, args=(url, ipcounts))
thread_pool.append(t)
return thread_pool
#控制多线程
def startt(t_list, max,second):
l = len(t_list)
n = max
while l > 0:
if l > max:
nl = t_list[:max]
t_list = t_list[max:]
for t in nl:
t.start()
time.sleep(second)
for t in nl:
t.join()
print '*'*15, str(n)+ ' ip has been queried'+'*'*15
n += max
l = len(t_list)
continue
elif l <= max:
nl = t_list
for t in nl:
t.start()
for t in nl:
t.join()
print '>>> Totally ' + str(n+l ) + ' ip has been queried'
l = 0
if __name__ =="__main__":
dic={}
with open('access.log') as file_obj:
it = ip_dic(file_obj, dic)
t_list= threads(it)
startt(t_list, 15, 1)
多线程取队列
#!/usr/bin/python
import Queue
import threading
import time
exitFlag = 0
class myThread(threading.Thread):
def __init__(self, threadID, name, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.q = q
def run(self):
print "Starting " + self.name
process_data(self.name, self.q)
print "Exiting " + self.name
def process_data(threadName, q):
while not exitFlag: # 死循环等待
queueLock.acquire()
if not q.empty(): # 判断队列是否为空
data = q.get()
print "%s processing %s" % (threadName, data)
queueLock.release()
time.sleep(1)
nameList = ["One", "Two", "Three", "Four", "Five"]
queueLock = threading.Lock() # 锁与队列并无任何关联,其他线程也进行取锁操作的时候就会检查是否有被占用,有就阻塞等待解锁为止
workQueue = Queue.Queue(10)
threads = []
threadID = 1
# Create new threads
for threadID in range(100):
thread = myThread(threadID, 'tName%s' % threadID, workQueue)
thread.start()
threads.append(thread)
threadID += 1
# Fill the queue
queueLock.acquire()
for word in nameList:
workQueue.put(word)
queueLock.release()
# Wait for queue to empty
while not workQueue.empty(): # 死循环判断队列被处理完毕
pass
# Notify threads it's time to exit
exitFlag = 1
# Wait for all threads to complete
for t in threads:
t.join()
print "Exiting Main Thread"
Queue通用队列
q=Queue(size) # 创建大小size的Queue对象
qsize() # 返回队列的大小(返回时候,可能被其他进程修改,近似值)
empty() # 如果队列为空返回True,否则False
full() # 如果队列已满返回True,否则False
put(item,block0) # 把item放到队列中,如果给了block(不为0),函数会一直阻塞到队列中有空间为止
get(block=0) # 从队列中取一个对象,如果给了block(不为0),函数会一直阻塞到队列中有对象为止
get_nowait # 默认get阻塞,这个不阻塞
multiprocessing [多进程并发]
线程池
import urllib2
from multiprocessing.dummy import Pool as ThreadPool
urls=['http://www.baidu.com','http://www.sohu.com']
pool=ThreadPool(4) # 线程池
results=pool.map(urllib2.urlopen,urls)
pool.close()
pool.join()
进程并发
#!/usr/bin/env python
#encoding:utf8
from multiprocessing import Process
import time,os
def f(name):
time.sleep(1)
print 'hello ',name
print os.getppid() # 取得父进程ID
print os.getpid() # 取得进程ID
process_list = []
for i in range(10):
p = Process(target=f,args=(i,))
p.start()
process_list.append(p)
for j in process_list:
j.join()
进程池
#!/usr/bin/env python
#encoding:utf8
from multiprocessing import Pool
import time,os
def f(name):
time.sleep(1)
print 'hello ',name
print os.getppid()
print os.getpid()
process_list = []
pool = Pool(4)
res = pool.map(f, range(1,10))
pool.close()
pool.join()
Queue进程间通信
from multiprocessing import Process,Queue
import time
def f(name):
time.sleep(1)
q.put(['hello'+str(name)])
process_list = []
q = Queue()
if __name__ == '__main__':
for i in range(10):
p = Process(target=f,args=(i,))
p.start()
process_list.append(p)
for j in process_list:
j.join()
for i in range(10):
print q.get()
Pipe管道 # 单项通信
from multiprocessing import Process,Pipe
import time
import os
def f(conn,name):
time.sleep(1)
conn.send(['hello'+str(name)])
print os.getppid(),'-----------',os.getpid()
process_list = []
parent_conn,child_conn = Pipe()
if __name__ == '__main__':
for i in range(10):
p = Process(target=f,args=(child_conn,i))
p.start()
process_list.append(p)
for j in process_list:
j.join()
for p in range(10):
print parent_conn.recv()
进程间同步
#加锁,使某一时刻只有一个进程,其他在调用同一个锁就会被阻塞
from multiprocessing import Process,Lock
import time
import os
def f(name):
lock.acquire()
time.sleep(1)
print 'hello--'+str(name)
print os.getppid(),'-----------',os.getpid()
lock.release()
process_list = []
lock = Lock()
if __name__ == '__main__':
for i in range(10):
p = Process(target=f,args=(i,))
p.start()
process_list.append(p)
for j in process_list:
j.join()
共享内存 # 双向通信
# 通过使用Value或者Array把数据存储在一个共享的内存表中
# 'd'和'i'参数是num和arr用来设置类型,d表示一个双精浮点类型,i表示一个带符号的整型。
from multiprocessing import Process,Value,Array
import time
import os
def f(n,a,name):
time.sleep(1)
n.value = name * name
for i in range(len(a)):
a[i] = -i
process_list = []
if __name__ == '__main__':
num = Value('d',0.0)
arr = Array('i',range(10))
for i in range(10):
p = Process(target=f,args=(num,arr,i))
p.start()
process_list.append(p)
for j in process_list:
j.join()
print num.value
print arr[:]
manager
# 比共享内存灵活,但缓慢
# 支持list,dict,Namespace,Lock,Semaphore,BoundedSemaphore,Condition,Event,Queue,Value,Array
from multiprocessing import Process,Manager
import time
import os
def f(d,name):
time.sleep(1)
d[name] = name * name
print d
process_list = []
if __name__ == '__main__':
manager = Manager()
d = manager.dict()
for i in range(10):
p = Process(target=f,args=(d,i))
p.start()
process_list.append(p)
for j in process_list:
j.join()
print d
最大并发数
import multiprocessing
import time,os
result = []
def run(h):
print 'threading:' ,h,os.getpid()
p = multiprocessing.Pool(processes=20)
for i in range(100):
result.append(p.apply_async(run,(i,)))
p.close()
for res in result:
res.get(timeout=5)
gevent [轻量级协程]
# 在gevent中用到的主要模式是Greenlet, 它是以C扩展模块形式接入Python的轻量级协程。 Greenlet全部运行在主程序操作系统进程的内部,但它们被协作式地调度。
# http://xlambda.com/gevent-tutorial/
锁的使用
# 同时允许多个协程操作对象的锁,通过互斥访问,保证资源只在程序上下文被单次使用
from gevent import sleep
from gevent.pool import Pool
from gevent.coros import BoundedSemaphore
sem = BoundedSemaphore(2) # 超过2就会阻塞等待
def worker1(n):
sem.acquire()
print('Worker %i acquired semaphore' % n)
sleep(0)
sem.release()
print('Worker %i released semaphore' % n)
def worker2(n):
with sem:
print('Worker %i acquired semaphore' % n)
sleep(0)
print('Worker %i released semaphore' % n)
pool = Pool()
pool.map(worker1, xrange(0,2))
pool.map(worker2, xrange(3,6))
事件
# Event 阻塞事件
import gevent
from gevent.event import Event
evt = Event()
def setter():
'''After 3 seconds, wake all threads waiting on the value of evt'''
print('A: Hey wait for me, I have to do something')
gevent.sleep(3)
print("Ok, I'm done")
evt.set() # 表示事件完成
def waiter():
'''After 3 seconds the get call will unblock'''
print("I'll wait for you")
evt.wait() # 阻塞等待事件完成
print("It's about time")
gevent.joinall([
gevent.spawn(setter),
gevent.spawn(waiter),
gevent.spawn(waiter),
gevent.spawn(waiter),
gevent.spawn(waiter),
gevent.spawn(waiter)
])
# AsyncResult 可传值的事件
import gevent
from gevent.event import AsyncResult
a = AsyncResult()
def setter():
gevent.sleep(3)
a.set('Hello!') # 事件传值
def waiter():
"""
After 3 seconds the get call will unblock after the setter
puts a value into the AsyncResult.
"""
print(a.get()) # 获取时间值
gevent.joinall([
gevent.spawn(setter),
gevent.spawn(waiter),
])
队列
#/usr/local/python
#encoding:utf8
import gevent
from gevent.pool import Pool
from gevent.coros import BoundedSemaphore
from gevent.queue import Queue, Empty
import os
tasks = Queue(maxsize=30) # 队列 超过30引发 gevent.hub.LoopExit
tasks1 = Queue()
def boss():
print '放队列任务'
for i in xrange(1,25):
tasks.put(i)
def worker1(n):
print len(pool)
while not tasks.empty(): # 判断队列是否为空
task = tasks.get() # 获取队列内容
tasks1.put(os.popen('id').read())
print('Worker %s got task %s' % (n, task))
gevent.sleep(0) # 放弃当前任务
def worker2(name):
try:
while True:
task = tasks1.get(timeout=2)
print '获取后释放:%s' % task
gevent.sleep(0)
except Empty: # 等待超时报错完成
print('Quitting time!')
gevent.spawn(boss).join() # 执行单次协程任务
pool = Pool(5) # 协程池大小
pool.map(worker1, xrange(0,20)) # 通过map方法把多个任务分发给池中的5个协程
gevent.joinall([ # 同时执行多个协程任务
gevent.spawn(worker2, 'steve'),
gevent.spawn(worker2, 'john'),
gevent.spawn(worker2, 'nancy'),
])
9 框架
flask [微型网络开发框架]
# http://dormousehole.readthedocs.org/en/latest/
# http://www.pythonhosted.org/Flask-Bootstrap/basic-usage.html#templates
# html放在 ./templates/ js放在 ./static/
#pip install Flask-Login
#pip install Flask-OpenID
#pip install Flask-WTF
#pip install flask-bootstrap
#pip install flask-sqlalchemy
#pip install flask-script
#pip install flask-migrate
request.args.get('page', 1) # 获取参数 ?page=1
request.json # 获取传递的整个json数据
request.form.get("host",'127') # 获取表单值
request.form.getlist('client') # 获取表单列表
简单实例 # 接收数据和展示
import MySQLdb as mysql
from flask import Flask, request
app = Flask(__name__)
db.autocommit(True)
c = db.cursor()
"""
CREATE TABLE `statusinfo` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`hostname` varchar(32) NOT NULL,
`load` float(10) NOT NULL DEFAULT 0.00,
`time` int(15) NOT NULL,
`memtotal` int(15) NOT NULL,
`memusage` int(15) NOT NULL,
`memfree` int(15) NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=161 DEFAULT CHARSET=utf8;
"""
@app.route("/collect", methods=["GET", "POST"])
def collect():
sql = ""
if request.method == "POST":
data = request.json # 获取传递的json
hostname = data["Host"]
load = data["LoadAvg"]
time = data["Time"]
memtotal = data["MemTotal"]
memusage = data["MemUsage"]
memfree = data["MemFree"]
try:
sql = "INSERT INTO `statusinfo` (`hostname`,`load`,`time`,`memtotal`,`memusage`,`memfree`) VALUES('%s', %s, %s, %s, %s, %s);" % (hostname, load,time,memtotal,memusage,memfree)
ret = c.execute(sql)
return 'ok'
except mysql.IntegrityError:
return 'errer'
@app.route("/show", methods=["GET", "POST"])
def show():
try:
hostname = request.form.get("hostname") # 获取表单方式的变量值
sql = "SELECT `load` FROM `statusinfo` WHERE hostname = '%s';" % (hostname)
c.execute(sql)
ones = c.fetchall()
return render_template("sysstatus.html", data=ones, sql = sql)
except:
print 'hostname null'
from flask import render_template
@app.route("/xxx/<name>")
def hello_xx(name):
return render_template("sysstatus.html", name='teach')
if __name__ == "__main__":
app.run(host="0.0.0.0", port=50000, debug=True)
Flask-SQLAlchemy
http://www.pythondoc.com/flask-sqlalchemy/queries.html#id2
http://docs.jinkan.org/docs/flask-sqlalchemy/models.html#id2
https://www.cnblogs.com/mosson/p/6257147.html
db.create_all() # 创建表
增加
admin = User('admin', 'admin@example.com')
db.session.add(admin)
db.session.add(guest)
db.session.commit()
查询
# 返回数组
users = User.query.all()
# 条件过滤 返回一个对象 不存在返回 返回none 像python传参数
peter = User.query.filter_by(username = 'peter').first()
# 条件过滤 像sql 可使用 ><
peter = User.query.filter(username == 'peter').first()
# 获取指定列的值
print peter.username
# 复杂查询 返回列表对象
User.query.filter(User.email.endswith('@example.com')).all()
# 对查询结果按指定列排序
User.query.order_by(User.username)
# 取前面的指定条数
User.query.limit(1).all()
# 通过主键来获取对象
User.query.get(1)
# 通配查询 ilike 忽略大小写
User.query.filter(User.username.ilike('online_%')).all()
User.query.filter(User.username.notilike('online_%')).all()
删除
user = User.query.get(id)
db.session.delete(user)
db.session.commit()
User.query.filter_by(id=123).delete()
User.query.filter(User.id == 123).delete()
改
db.session.query(Users).filter(Users.id > 2).update({"name" : "099"})
db.session.commit()
q = db.session.query(Toner)
q = q.filter(Toner.toner_id==1)
record = q.one()
record.toner_color = 'Azure Radiance'
db.session.flush()
连表
ret = session.query(Users, Favor).filter(Users.id == Favor.nid).all()
ret = session.query(Person).join(Favor).all()
ret = session.query(Person).join(Favor, isouter=True).all()
通配符
ret = session.query(Users).filter(Users.name.like('e%')).all()
ret = session.query(Users).filter(~Users.name.like('e%')).all()
排序
ret = session.query(Users).order_by(Users.name).all() # 正序
ret = session.query(Users).order_by(Users.name.desc()).all() # 倒序
ret = session.query(Users).order_by(Users.name.desc(), Users.id.asc()).all()
twisted [非阻塞异步服务器框架]
# 较老 推荐使用 协程框架 或 微线程框架
# 用来进行网络服务和应用程序的编程。虽然 Twisted Matrix 中有大量松散耦合的模块化组件,但该框架的中心概念还是非阻塞异步服务器这一思想。对于习惯于线程技术或分叉服务器的开发人员来说,这是一种新颖的编程风格,但它却能在繁重负载的情况下带来极高的效率。
pip install twisted
from twisted.internet import protocol, reactor, endpoints
class Echo(protocol.Protocol):
def dataReceived(self, data):
self.transport.write(data)
class EchoFactory(protocol.Factory):
dDescribeInstanceStatusef buildProtocol(self, addr):
return Echo()
endpoints.serverFromString(reactor, "tcp:1234").listen(EchoFactory())
reactor.run()
服务端
#!/usr/bin/env python
from twisted.application import service, internet
from txjsonrpc.netstring import jsonrpc
class Example(jsonrpc.JSONRPC):
"""An example object to be published."""
def jsonrpc_echo(self, x):
"""Return all passed args."""
return x
def jsonrpc_add(self, a, b):
"""Return sum of arguments."""
print "add", a, b
return a + b
factory = jsonrpc.RPCFactory(Example())
application = service.Application("Example JSON-RPC Server")
jsonrpcServer = internet.TCPServer(7080, factory)
jsonrpcServer.setServiceParent(application)
客户端
#!/usr/bin/env python
import os
import sys
sys.path.insert(0, os.getcwd())
from twisted.internet import reactor
from txjsonrpc.netstring.jsonrpc import Proxy
def printValue(value):
print "Result: %s" % str(value)
reactor.stop()
def printError(error):
print 'error', error
reactor.stop()
proxy = Proxy('127.0.0.1', 7080)
proxy.callRemote('add', 3, 5).addCallbacks(printValue, printError)
reactor.run()
Celery [分布式任务队列]
# http://docs.jinkan.org/docs/celery/getting-started/introduction.html
pip install -U Celery
tornado [极轻量级Web服务器框架]
# 高可伸缩性和epoll非阻塞IO,响应快速,可处理数千并发连接,特别适用用于实时的Web服务 底层是gevent协程
# http://www.tornadoweb.cn/documentation
# http://old.sebug.net/paper/books/tornado/#_2
# http://demo.pythoner.com/itt2zh/ch5.html
# 非阻塞方式生成子进程
# https://github.com/vukasin/tornado-subprocess
pip install tornado
self.get_argument() # 方法来获取查询字符串参数,以及解析 POST 的内容
self.request.arguments # 所有的 GET 或 POST 的参数
self.request.files # 所有通过 multipart/form-data POST 请求上传的文件
self.request.path # 请求的路径( ? 之前的所有内容)
self.request.headers # 请求的开头信息
callback # 执行完成后执行回调函数
@tornado.web.asynchronous # 非阻塞异步装饰器
self.finish() # 使用非阻塞异步 必须调用 self.finish() 已完成 HTTTP 请求
# 异步 HTTP 客户端 两种模式 默认 SimpleAsyncHTTPClient 如果要修改为 CurlAsyncHTTPClient
AsyncHTTPClient.configure('tornado.curl_httpclient.CurlAsyncHTTPClient')
import tornado.ioloop
import tornado.web
import tornado.httpclient
import json
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
def post(self):
self.set_header("Content-Type", "text/plain")
self.write("You wrote " + self.get_argument("message"))
class Template(tornado.web.RequestHandler):
def get(self):
items = ["Item 1", "Item 2", "Item 3"]
self.render("template.html", title="My title", items=items)
class urlhttp(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
http = tornado.httpclient.AsyncHTTPClient()
http.fetch("http://friendfeed-api.com/v2/feed/bret", callback=self.on_response)
def on_response(self, response):
if response.error: raise tornado.web.HTTPError(500)
jsondata = tornado.escape.json_decode(response.body)
print type(jsondata)
self.write(json.dumps(jsondata))
self.finish()
class StoryHandler(tornado.web.RequestHandler):
def get(self, story_id):
self.write("You requested the story " + story_id)
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
(r"/template", Template),
(r"/story/([0-9]+)", StoryHandler),
(r"/tapi", urlhttp),
])
if __name__ == "__main__":
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
Scrapy [web抓取框架]
# Python开发的一个快速,高层次的屏幕抓取和web抓取框架,用于抓取web站点并从页面中提取结构化的数据。Scrapy用途广泛,可以用于数据挖掘、监测和自动化测试。
pip install scrapy
from scrapy import Spider, Item, Field
class Post(Item):
title = Field()
class BlogSpider(Spider):
name, start_urls = 'blogspider', ['http://blog.scrapinghub.com']
def parse(self, response):
return [Post(title=e.extract()) for e in response.css("h2 a::text")]
scrapy runspider myspider.py
django [重量级web框架]
bottle [轻量级的Web框架]
stackless [增强版python]
微线程扩展,是一种低开销、轻量级的便利工具 避免传统线程所带来的性能与复杂度问题
greenlet [微线程/协程框架]
# 更加原始的微线程的概念,没有调度,或者叫做协程。这在你需要控制你的代码时很有用。你可以自己构造微线程的 调度器;也可以使用"greenlet"实现高级的控制流。例如可以重新创建构造器;不同于Python的构造器,我们的构造器可以嵌套的调用函数,而被嵌套的函数也可以 yield 一个值。
pip install greenlet
asyncio [异步I/O协同]
# https://docs.python.org/3/library/asyncio.html
需要python3.4+
asyncio: 协同程序和事件循环。协同程序像是方法,但是它们可以在代码中的特定点暂停和继续。当在等待一个IO(比如一个HTTP请求),同时执行另一个请求的时候,可以用来暂停一个协同程序。我们使用关键字yield from来设定一个状态,表明我们需要一个协同程序的返回值。而事件循环则被用来安排协同程序的执行。
10例子
小算法
斐波那契
#将函数结果作为列表可用于循环
def fab(max):
n, a, b = 0, 0, 1
while n < max:
yield b
a, b = b, a + b
n = n + 1
for n in fab(5):
print n
乘法口诀
#!/usr/bin/python
for i in range(1,10):
for j in range(1,i+1):
print j,'*',i,'=',j*i,
else:
print ''
最小公倍数
# 1-70的最小公倍数
def c(m,n):
a1=m
b1=n
r=n%m
while r!=0:
n=m
m=r
r=n%m
return (a1*b1)/m
d=1
for i in range(3,71,2):
d = c(d,i)
print d
排序算法
插入排序
def insertion_sort(sort_list):
iter_len = len(sort_list)
if iter_len < 2:
return sort_list
for i in range(1, iter_len):
key = sort_list[i]
j = i - 1
while j>=0 and sort_list[j]>key:
sort_list[j+1] = sort_list[j]
j -= 1
sort_list[j+1] = key
return sort_list
选择排序
def selection_sort(sort_list):
iter_len = len(sort_list)
if iter_len < 2:
return sort_list
for i in range(iter_len-1):
smallest = sort_list[i]
location = i
for j in range(i, iter_len):
if sort_list[j] < smallest:
smallest = sort_list[j]
location = j
if i != location:
sort_list[i], sort_list[location] = sort_list[location], sort_list[i]
return sort_list
冒泡排序
def bubblesort(numbers):
for j in range(len(numbers)-1,-1,-1):
for i in range(j):
if numbers[i]>numbers[i+1]:
numbers[i],numbers[i+1] = numbers[i+1],numbers[i]
print(i,j)
print(numbers)
快速排序
# 先从数列中取出一个数作为基准数。
# 分区过程,将比这个数大的数全放到它的右边,小于或等于它的数全放到它的左边。
# 再对左右区间重复第二步,直到各区间只有一个数。
#!/usr/bin/python
# -*- coding: utf-8 -*-
def sub_sort(array,low,high):
key = array[low]
while low < high:
while low < high and array[high] >= key:
high -= 1
while low < high and array[high] < key:
array[low] = array[high]
low += 1
array[high] = array[low]
array[low] = key
return low
def quick_sort(array,low,high):
if low < high:
key_index = sub_sort(array,low,high)
quick_sort(array,low,key_index)
quick_sort(array,key_index+1,high)
if __name__ == '__main__':
array = [8,10,9,6,4,16,5,13,26,18,2,45,34,23,1,7,3]
print array
quick_sort(array,0,len(array)-1)
print array
二分算法
#python 2f.py 123456789 4
# list('123456789') = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
#!/usr/bin/env python
import sys
def search2(a,m):
low = 0
high = len(a) - 1
while(low <= high):
mid = (low + high)/2
midval = a[mid]
if midval < m:
low = mid + 1
elif midval > m:
high = mid - 1
else:
print mid
return mid
print -1
return -1
if __name__ == "__main__":
a = [int(i) for i in list(sys.argv[1])]
m = int(sys.argv[2])
search2(a,m)
全排序
def Mideng(li):
if(type(li)!=list):
return
if(len(li)==1):
return [li]
result=[]
for i in range(0,len(li[:])):
bak=li[:]
head=bak.pop(i)
for j in Mideng(bak):
j.insert(0,head)
result.append(j)
return result
def MM(n):
if(type(n)!=int or n<2):
return
return Mideng(list(range(1,n)))
MM(6)
嵌套复杂排序
字典排序
按照键值(value)排序
# a = {'a': 'China', 'c': 'USA', 'b': 'Russia', 'd': 'Canada'}
b = sorted(a.items(), key=lambda x: x[1], reverse=True)
#[('c', 'USA'), ('b', 'Russia'), ('a', 'China'), ('d', 'Canada')]
按照键名(key)排序
#a = {'a': 'China', 'c': 'USA', 'b': 'Russia', 'd': 'Canada'}
b = sorted(a.items(), key=lambda x: x[0], reverse=True)
#[('d', 'Canada'), ('c', 'USA'), ('b', 'Russia'), ('a', 'China')]
嵌套字典, 按照字典键名(key)排序
#a = {'a': {'b': 'China'}, 'c': {'d': 'USA'}, 'b': {'c': 'Russia'}, 'd': {'a': 'Canada'}}
b = sorted(a.items(), key=lambda x: x[1], reverse=True)
#[('c', {'d': 'USA'}), ('b', {'c': 'Russia'}), ('a', {'b': 'China'}), ('d', {'a': 'Canada'})]
嵌套列表, 针对列表第一个元素排序( 其实直接写 x: x[1] 就是按照第一个值排序. )
#a = {'a': [1, 3], 'c': [3, 4], 'b': [0, 2], 'd': [2, 1]}
b = sorted(a.items(), key=lambda x: x[1][0], reverse=True)
#[('c', [3, 4]), ('d', [2, 1]), ('a', [1, 3]), ('b', [0, 2])]
嵌套列表, 按照列表其他元素排序 只需要修改列表对应的下标
# a = {'a': [1, 3], 'c': [3, 4], 'b': [0, 2], 'd': [2, 1]}
b = sorted(a.items(), key=lambda x: x[1][1], reverse=True)
# [('c', [3, 4]), ('a', [1, 3]), ('b', [0, 2]), ('d', [2, 1])]
# 总结: 此处使用lambda方法, x: x[1][1] 就可以看做是在访问字典的值, 想要按照哪个数值排序, 用相应的坐标对应即可, 但当字典过于复杂后, 应该选择用元组存储, 简化排序过程.
列表排序
1: 按照字母排序
# a = ['USA', 'China', 'Canada', 'Russia']
a.sort(reverse=True)
# ['USA', 'Russia', 'China', 'Canada']
2: 嵌套列表的排序, 按照子列表的其他值排序雷系, 修改x[0] 这里的下标即可
# a = [['USA', 'b'], ['China', 'c'], ['Canada', 'd'], ['Russia', 'a']]
a.sort(key=lambda x: x[0], reverse=True)
# [['USA', 'b'], ['Russia', 'a'], ['China', 'c'], ['Canada', 'd']]
3: 嵌套字典, 按照字典值(value) 排序
# a = [{'letter': 'b'}, {'letter': 'c'}, {'letter': 'd'}, {'letter': 'a'}]
a.sort(key=lambda x: x['letter'], reverse=True)
# [{'letter': 'd'}, {'letter': 'c'}, {'letter': 'b'}, {'letter': 'a'}]
4: 当字典值也是字典时, 这时候会优先按照键名排序, 再按照键值排序. 例子如下
# a = [{'letter': {'a': 'b'}}, {'letter': {'a': 'c'}}, {'letter': {'a': 'd'}}, {'letter': {'a': 'a'}}]
a.sort(key=lambda x: x['letter'], reverse=True)
# [{'letter': {'a': 'd'}}, {'letter': {'a': 'c'}}, {'letter': {'a': 'b'}}, {'letter': {'a': 'a'}}]
方法2:
# a = [{'letter': {'a': 'b'}}, {'letter': {'b': 'c'}}, {'letter': {'c': 'd'}}, {'letter': {'d': 'a'}}]
a.sort(key=lambda x: x['letter'], reverse=True)
#[{'letter': {'d': 'a'}}, {'letter': {'c': 'd'}}, {'letter': {'b': 'c'}}, {'letter': {'a': 'b'}}]
1000以内是3或者是5的倍数的值的和
sum([ num for num in range(1, 1000) if num % 3 == 0 or num % 5 == 0 ])
打印如下列表
1
2 1
3 2 1
4 3 2 1
5 4 3 2 1
6 5 4 3 2 1
#!/usr/local/python
i=1
while i < 7:
a = ""
n=1
while n <= i:
a = "%s %s" %(n, a)
n = n + 1
print a
i = i + 1
将字典中所有time去掉
a={'version01': {'nba': {'timenba': 'valuesasdfasdf', 'nbanbac': 'vtimefasdf', 'userasdf': 'vtimasdf'}}}
eval(str(a).replace("time",""))
阿里云oss
https://help.aliyun.com/document_detail/32027.html?spm=5176.doc32026.6.674.AXf7Lw
pip install oss2
# -*- coding: utf-8 -*-
import oss2
auth = oss2.Auth('AccessKeyId', 'AccessKeySecret')
# 注意内外网域名 不带bucket
service = oss2.Service(auth, 'oss-cn-shanghai-internal.aliyuncs.com')
print([b.name for b in oss2.BucketIterator(service)]) # 查看存在的bucket
bucket = oss2.Bucket(auth, 'http://oss-cn-shanghai-internal.aliyuncs.com', 'ec-share')
# bucket.create_bucket(oss2.models.BUCKET_ACL_PRIVATE) # 创建bucket
bucket.put_object_from_file('remote.txt','/tmp/local.txt') # 上传文件
bucket.get_object_to_file('remote.txt', 'local-backup.txt') # 下载文件
bucket.delete_object('remote.txt') # 删除文件
阿里云ecs
https://help.aliyun.com/document_detail/67117.html?spm=a2c4g.11186623.6.543.390360e41Cfpqm
pip install aliyun-python-sdk-core # 安装阿里云SDK核心库
pip install aliyun-python-sdk-ecs # 安装管理ECS的库
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.acs_exception.exceptions import ClientException
from aliyunsdkcore.acs_exception.exceptions import ServerException
from aliyunsdkecs.request.v20140526 import DescribeInstancesRequest
from aliyunsdkecs.request.v20140526 import StopInstanceRequest
client = AcsClient(
"your-access-key-id",
"your-access-key-secret",
"your-region-id"
);
request = DescribeInstancesRequest.DescribeInstancesRequest()
request.set_PageSize(10)
try:
response = client.do_action_with_exception(request)
print response
except ServerException as e:
print e
except ClientException as e:
print e
# 使用CommonRequest的方式调用ECS的 DescribeInstanceStatus 接口
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.request import CommonRequest
client = AcsClient('your_access_key_id', 'your_access_key_secret', 'your_region_id')
request = CommonRequest()
request.set_domain('ecs.aliyuncs.com')
request.set_version('2014-05-26')
request.set_action_name('DescribeInstanceStatus')
request.add_query_param('PageNumber', '1')
request.add_query_param('PageSize', '30')
request.add_query_param('ZoneId', 'cn-shanghai-d')
response = client.do_action_with_exception(request)
# 接口列表
https://help.aliyun.com/document_detail/25506.html?spm=a2c4g.11186623.6.1084.2f672eafMskx7S
# 调用DescribeInstances查询一台或多台实例的详细信息
DescribeInstances
# 调用CreateInstance创建一台ECS实例
CreateInstance
# 调用StartInstance启动一台实例
StartInstance
# 调用StopInstance停止运行一台实例
StopInstance
# 调用DescribeInstanceStatus获取一台或多台ECS实例的状态信息
DescribeInstanceStatus
# 创建ecs, CreateInstance, stop状态
# 参数列表
# https://help.aliyun.com/document_detail/25499.html?spm=a2c4g.11186623.6.1095.4347431djUtw2v
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.request import CommonRequest
client = AcsClient('LTAIzeBZre', 'fLJOBweE8qHKxrEOnc2FIF', 'cn-shanghai')
request = CommonRequest()
request.set_domain('ecs.aliyuncs.com')
request.set_version('2014-05-26')
request.set_action_name('CreateInstance')
request.add_query_param('ImageId', 'm-uf67jei1pul0xpfsfpfv')
request.add_query_param('InstanceType', 'ecs.c5.large')
request.add_query_param('RegionId', 'cn-shanghai')
request.add_query_param('ZoneId', 'cn-shanghai-f')
request.add_query_param('SecurityGroupId', 'sg-uf6i53pjsi11yuyrwyqs')
request.add_query_param('VSwitchId', 'vsw-uf630eqh0edoe9n3ig7lz')
request.add_query_param('Period', '1')
request.add_query_param('InstanceChargeType', 'PrePaid')
request.add_query_param('AutoRenew', 'true')
request.add_query_param('AutoRenewPeriod', '1')
request.add_query_param('InstanceName', 'xuesong-test1')
request.add_query_param('HostName', 'xuesong-test1')
request.add_query_param('Password', 'azuDa9nee6aiHaey')
request.add_query_param('SystemDisk.Size', '200')
request.add_query_param('SystemDisk.Category', 'cloud_efficiency')
request.add_query_param('SystemDisk.DiskName', 'xuesong-test1')
response = client.do_action_with_exception(request)
# InstanceId # 实例ID,是访问实例的唯一标识
# RequestId # 无论调用接口成功与否,都会返回请求ID
# 启动ecs StartInstance
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.request import CommonRequest
client = AcsClient('LTAIzeBZre', 'fLJOBweE8qHKxrEOnc2FIF', 'cn-shanghai')
request = CommonRequest()
request.set_domain('ecs.aliyuncs.com')
request.set_version('2014-05-26')
request.set_action_name('StartInstance')
request.add_query_param('InstanceId', 'i-uf69e821lkybxke6yyno')
response = client.do_action_with_exception(request)
# 查询ecs信息 DescribeInstances
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.request import CommonRequest
import json
client = AcsClient('LTAIzeBZre', 'fLJOBweE8qHKxrEOnc2FIF', 'cn-shanghai')
request = CommonRequest()
request.set_domain('ecs.aliyuncs.com')
request.set_version('2014-05-26')
request.set_action_name('DescribeInstances')
request.add_query_param('InstanceIds', ['i-uf69e821lkybxke6yyno'])
response = client.do_action_with_exception(request)
jresponse = json.loads(response)
ip = jresponse['Instances']['Instance'][0]['NetworkInterfaces']['NetworkInterface'][0]['PrimaryIpAddress']
status = jresponse['Instances']['Instance'][0]['Status']
# Stopped Stopping Starting Running
# 停止ecs StopInstance
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.request import CommonRequest
client = AcsClient('LTAIzeBZre', 'fLJOBweE8qHKxrEOnc2FIF', 'cn-shanghai')
request = CommonRequest()
request.set_domain('ecs.aliyuncs.com')
request.set_version('2014-05-26')
request.set_action_name('StopInstance')
request.add_query_param('InstanceId', 'i-uf69e821lkybxke6yyno')
response = client.do_action_with_exception(request)
# 删除ecs DeleteInstance 释放一台按量付费实例或者到期的预付费(包年包月)实例
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.request import CommonRequest
client = AcsClient('LTAIzeBZre', 'fLJOBweE8qHKxrEOnc2FIF', 'cn-shanghai')
request = CommonRequest()
request.set_domain('ecs.aliyuncs.com')
request.set_version('2014-05-26')
request.set_action_name('DeleteInstance')
request.add_query_param('InstanceId', 'i-uf69e821lkybxke6yyno')
request.add_query_param('Force', 'true')
response = client.do_action_with_exception(request)
PIL图像处理
import Image
im = Image.open("j.jpg") # 打开图片
print im.format, im.size, im.mode # 打印图像格式、像素宽和高、模式
# JPEG (440, 330) RGB
im.show() # 显示最新加载图像
box = (100, 100, 200, 200)
region = im.crop(box) # 从图像中提取出某个矩形大小的图像
图片等比缩小
# -*- coding: cp936 -*-
import Image
import glob, os
#图片批处理
def timage():
for files in glob.glob('D:\\1\\*.JPG'):
filepath,filename = os.path.split(files)
filterame,exts = os.path.splitext(filename)
#输出路径
opfile = r'D:\\22\\'
#判断opfile是否存在,不存在则创建
if (os.path.isdir(opfile)==False):
os.mkdir(opfile)
im = Image.open(files)
w,h = im.size
#im_ss = im.resize((400,400))
#im_ss = im.convert('P')
im_ss = im.resize((int(w*0.12), int(h*0.12)))
im_ss.save(opfile+filterame+'.jpg')
if __name__=='__main__':
timage()
取系统返回值赋给序列
cmd = os.popen("df -Ph|awk 'NR!=1{print $5}'").readlines();
cmd = os.popen('df -h').read().split('\n')
cmd = os.popen('lo 2>&1').read()
#取磁盘使用空间
import commands
df = commands.getoutput("df -hP")
[ x.split()[4] for x in df.split("\n") ]
[ (x.split()[0],x.split()[4]) for x in df.split("\n") if x.split()[4].endswith("%") ]
切片获取星星
def getRating(rating):
return '★★★★★☆☆☆☆☆'.decode('utf8')[5-rating:10-rating]
print getRating(1)
print getRating(3)
打印表格
map = [["a","b","c"],
["d","e","f"],
["g","h","i"]]
def print_board():
for i in range(0,3):
for j in range(0,3):
print "|",map[i][j],
#if j != 2:
print '|'
井字游戏
#!/usr/bin/python
# http://www.admin10000.com/document/2506.html
def print_board():
for i in range(0,3):
for j in range(0,3):
print map[2-i][j],
if j != 2:
print "|",
print ""
def check_done():
for i in range(0,3):
if map[i][0] == map[i][1] == map[i][2] != " " \
or map[0][i] == map[1][i] == map[2][i] != " ":
print turn, "won!!!"
return True
if map[0][0] == map[1][1] == map[2][2] != " " \
or map[0][2] == map[1][1] == map[2][0] != " ":
print turn, "won!!!"
return True
if " " not in map[0] and " " not in map[1] and " " not in map[2]:
print "Draw"
return True
return False
turn = "X"
map = [[" "," "," "],
[" "," "," "],
[" "," "," "]]
done = False
while done != True:
print_board()
print turn, "'s turn"
print
moved = False
while moved != True:
print "Please select position by typing in a number between 1 and 9, see below for which number that is which position..."
print "7|8|9"
print "4|5|6"
print "1|2|3"
print
try:
pos = input("Select: ")
if pos <=9 and pos >=1:
Y = pos/3
X = pos%3
if X != 0:
X -=1
else:
X = 2
Y -=1
if map[Y][X] == " ":
map[Y][X] = turn
moved = True
done = check_done()
if done == False:
if turn == "X":
turn = "O"
else:
turn = "X"
except:
print "You need to add a numeric value"
网段划分
题目
192.168.1
192.168.3
192.168.2
172.16.3
192.16.1
192.16.2
192.16.3
10.0.4
输出结果:
192.16.1-192.16.3
192.168.1-192.168.3
172.16.3
10.0.4
答案
#!/usr/bin/python
f = file('a.txt')
c = f.readlines()
dic={}
for i in c:
a=i.strip().split('.')
if a[0]+'.'+a[1] in dic.keys():
key=dic["%s.%s" %(a[0],a[1])]
else:
key=[]
key.append(a[2])
dic[a[0]+'.'+a[1]]=sorted(key)
for x,y in dic.items():
if y[0] == y[-1]:
print '%s.%s' %(x,y[0])
else:
print '%s.%s-%s.%s' %(x,y[0],x,y[-1])
统计日志IP
# 打印出独立IP,并统计独立IP数
219.140.190.130 - - [23/May/2006:08:57:59 +0800] "GET /fg172.exe HTTP/1.1" 200 2350253
221.228.143.52 - - [23/May/2006:08:58:08 +0800] "GET /fg172.exe HTTP/1.1" 206 719996
221.228.143.52 - - [23/May/2006:08:58:08 +0800] "GET /fg172.exe HTTP/1.1" 206 713242
#!/usr/bin/python
dic={}
a=open("a").readlines()
for i in a:
ip=i.strip().split()[0]
if ip in dic.keys():
dic[ip] = dic[ip] + 1
else:
dic[ip] = 1
for x,y in dic.items():
print x," ",y
多线程下载http
# 先从文件头中或取content-length的值,即文件大小,在用header中指定Range范围来下载文件中一段字符串
# 'Range':'bytes=0-499' # 表示头500个字节
# 'Range':'bytes=-500' # 表示最后500个字节
# 'Range':'bytes=500-' # 表示500字节以后的范围
# 'Range':'bytes=0-0,-1' # 第一个和最后一个字节
# 'Range':'bytes=50-60,61-99' # 同时指定几个范围
#!/usr/bin/env python
#encoding:utf8
import urllib2
import threading
class myThread(threading.Thread):
def __init__(self, url_file, scope, url):
threading.Thread.__init__(self)
self.url_file = url_file
self.scope = scope
self.url = url
def run(self):
req_header = {'User-Agent':"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0)",
'Accept':'text/html;q=0.9,*/*;q=0.8',
'Range':'bytes=%s' % self.scope,
'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Connection':'close',
}
req = urllib2.Request(self.url, headers=req_header)
data = urllib2.urlopen(req, data=None).read()
start_value = int(self.scope.split('-')[0])
threadLock.acquire()
self.url_file.seek(start_value)
self.url_file.write(data)
self.url_file.flush()
threadLock.release()
if __name__ == '__main__':
url = 'http://dldir1.qq.com/qqfile/qq/QQ7.1/14522/QQ7.1.exe'
size=int(urllib2.urlopen(url).info()['content-length'])
print size
threadnum = 4
len = size / threadnum
current = 0
url_file = file(url.split('/')[-1],'wb+')
threadLock = threading.Lock()
threads = []
for tName in range(1, threadnum + 1):
if tName < threadnum:
scope = "%d-%d" %(current,len * tName - 1)
current = len * tName
elif tName == threadnum:
scope = "%d-" %(current)
print scope
thread = myThread(url_file, scope, url)
thread.start()
threads.append(thread)
for t in threads:
t.join()
url_file.flush()
url_file.close()
获取网卡流量
#!/usr/bin/env python
net = []
f = open("/proc/net/dev")
lines = f.readlines()
f.close()
for line in lines[3:]:
con = line.split()
intf = dict(
zip(
( 'interface', 'ReceiveBytes', 'ReceivePackets', 'TransmitBytes', 'TransmitPackets',),
( con[0].split(":")[0], con[0].split(":")[1], int(con[1]), int(con[8]), int(con[9]),)
)
)
net.append(intf)
print net
阿里云sdk接口
# 阿里云接口列表
https://develop.aliyun.com/tools/sdk?#/python
# python sdk模块
https://help.aliyun.com/document_detail/30003.html?spm=5176.doc29995.2.1.htCtSa
# 接口参数详解
https://help.aliyun.com/document_detail/25500.html?spm=5176.doc25499.6.691.lWwhc0
pip install aliyun-python-sdk-core aliyun-python-sdk-ecs
dir(aliyunsdkecs.request)
v20140526
aliyunsdkecs.request.v20140526
#!/usr/bin/env python
from aliyunsdkcore import client
from aliyunsdkecs.request.v20140526 import DescribeRegionsRequest
clt = client.AcsClient('SFAW************','Nc2nZ6dQoiqck0*************',
'cn-hangzhou')
request=DescribeRegionsRequest.DescribeRegionsRequest()
print dir(request)
request.set_accept_format('json')
request.set_action_name("CreateInstance")
print(clt.do_action(request))
获取系统监控信息
#!/usr/bin/env python
import inspect
import os,time,socket
class mon:
def __init__(self):
self.data = {}
def getLoadAvg(self):
with open('/proc/loadavg') as load_open:
a = load_open.read().split()[:3]
#return "%s %s %s" % (a[0],a[1],a[2])
return float(a[0])
def getMemTotal(self):
with open('/proc/meminfo') as mem_open:
a = int(mem_open.readline().split()[1])
return a / 1024
def getMemUsage(self, noBufferCache=True):
if noBufferCache:
with open('/proc/meminfo') as mem_open:
T = int(mem_open.readline().split()[1]) #Total
F = int(mem_open.readline().split()[1]) #Free
B = int(mem_open.readline().split()[1]) #Buffer
C = int(mem_open.readline().split()[1]) #Cache
return (T-F-B-C)/1024
else:
with open('/proc/meminfo') as mem_open:
a = int(mem_open.readline().split()[1]) - int(mem_open.readline().split()[1])
return a / 1024
def getMemFree(self, noBufferCache=True):
if noBufferCache:
with open('/proc/meminfo') as mem_open:
T = int(mem_open.readline().split()[1])
F = int(mem_open.readline().split()[1])
B = int(mem_open.readline().split()[1])
C = int(mem_open.readline().split()[1])
return (F+B+C)/1024
else:
with open('/proc/meminfo') as mem_open:
mem_open.readline()
a = int(mem_open.readline().split()[1])
return a / 1024
def getDiskTotal(self):
disk = os.statvfs("/")
Total = disk.f_bsize * disk.f_blocks / 1024 / 1024
return Total
def getDiskFree(self):
disk = os.statvfs("/")
Free = disk.f_bsize * disk.f_bavail / 1024 / 1024
return Free
def getTraffic(self):
traffic = {}
f = open("/proc/net/dev")
lines = f.readlines()
f.close()
for line in lines[3:]:
con = line.split()
intf = dict(
zip(
('ReceiveBytes', 'TransmitBytes',),
(con[0].split(":")[1], int(con[8]),)
)
)
traffic[con[0].split(":")[0]] = intf
return traffic
def getHost(self):
#return ['host1', 'host2', 'host3', 'host4', 'host5'][int(time.time() * 1000.0) % 5]
return socket.gethostname()
def getTime(self):
return int(time.time())
def runAllGet(self):
for fun in inspect.getmembers(self, predicate=inspect.ismethod):
if fun[0][:3] == 'get':
self.data[fun[0][3:]] = fun[1]()
return self.data
if __name__ == "__main__":
print mon().runAllGet()
nginx_5xx钉钉报警
import os
import sys
import datetime
import time
import requests
import json
mtime = (datetime.datetime.now()-datetime.timedelta(minutes=1)).strftime("%Y-%m-%dT%H:%M")
num = int(os.popen('''tail -n 100000 /app/nginx/logs/*_access.log | grep %s |grep 'status": 5' |wc -l ''' % mtime ).read().strip())
print num
if num > 20:
print 'baojing'
Robot = 'https://oapi.dingtalk.com/robot/send?access_token=e80aa431d237d97217827524'
headers = {'content-type': 'application/json'}
content = "lite nginx dmz01 5XX: %s" % num
dingdata = {
"msgtype": "text",
"text": {
"content": content
}
}
try:
r = requests.post(url=Robot, data=json.dumps(dingdata), headers=headers, timeout=2).json()
except Exception as err:
print 'ERROR: notice dingding api error'
print str(err)
获取主机名
#!/usr/bin/env python
# -*- coding: utf8 -*-
#python network.py --host
import os
import socket
"""
copy from:
http://stackoverflow.com/questions/11735821/python-get-localhost-ip
"""
if os.name != "nt":
import fcntl
import struct
def get_interface_ip(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s', ifname[:15]))[20:24])
def lan_ip():
ip = socket.gethostbyname(socket.gethostname())
if ip.startswith("127.") and os.name != "nt":
interfaces = [
"eth0",
"eth1",
"eth2",
"wlan0",
"wlan1",
"wifi0",
"ath0",
"ath1",
"ppp0",
]
for ifname in interfaces:
try:
ip = get_interface_ip(ifname)
break
except IOError:
pass
return ip
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
print socket.gethostname()
sys.exit(0)
print lan_ip()
LazyManage并发批量操作(判断非root交互到root操作)
#!/usr/bin/python
#encoding:utf8
# LzayManage.py
# config file: serverlist.conf
import paramiko
import multiprocessing
import sys,os,time,socket,re
def Ssh_Cmd(host_ip,Cmd,user_name,user_pwd,port=22):
s = paramiko.SSHClient()
s.load_system_host_keys()
s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
s.connect(hostname=host_ip,port=port,username=user_name,password=user_pwd)
stdin,stdout,stderr = s.exec_command(Cmd)
Result = '%s%s' %(stdout.read(),stderr.read())
q.put('successful')
s.close()
return Result.strip()
def Ssh_Su_Cmd(host_ip,Cmd,user_name,user_pwd,root_name,root_pwd,port=22):
s = paramiko.SSHClient()
s.load_system_host_keys()
s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
s.connect(hostname=host_ip,port=port,username=user_name,password=user_pwd)
ssh = s.invoke_shell()
time.sleep(0.1)
ssh.send('su - %s\n' %(root_name))
buff = ''
while not buff.endswith('Password: '):
resp = ssh.recv(9999)
buff +=resp
ssh.send('%s\n' %(root_pwd))
buff = ''
while True:
resp = ssh.recv(9999)
buff +=resp
if ': incorrect password' in buff:
su_correct='passwd_error'
break
elif buff.endswith('# '):
su_correct='passwd_correct'
break
if su_correct == 'passwd_correct':
ssh.send('%s\n' %(Cmd))
buff = ''
while True:
resp = ssh.recv(9999)
if resp.endswith('# '):
buff +=re.sub('\[.*@.*\]# $','',resp)
break
buff +=resp
Result = buff.lstrip('%s' %(Cmd))
q.put('successful')
elif su_correct == 'passwd_error':
Result = "\033[31mroot密码错误\033[m"
s.close()
return Result.strip()
def Send_File(host_ip,PathList,user_name,user_pwd,Remote='/tmp',port=22):
s=paramiko.Transport((host_ip,port))
s.connect(username=user_name,password=user_pwd)
sftp=paramiko.SFTPClient.from_transport(s)
for InputPath in PathList:
LocalPath = re.sub('^\./','',InputPath.rstrip('/'))
RemotePath = '%s/%s' %( Remote , os.path.basename( LocalPath ))
try:
sftp.rmdir(RemotePath)
except:
pass
try:
sftp.remove(RemotePath)
except:
pass
if os.path.isdir(LocalPath):
sftp.mkdir(RemotePath)
for path,dirs,files in os.walk(LocalPath):
for dir in dirs:
dir_path = os.path.join(path,dir)
sftp.mkdir('%s/%s' %(RemotePath,re.sub('^%s/' %LocalPath,'',dir_path)))
for file in files:
file_path = os.path.join(path,file)
sftp.put( file_path,'%s/%s' %(RemotePath,re.sub('^%s/' %LocalPath,'',file_path)))
else:
sftp.put(LocalPath,RemotePath)
q.put('successful')
sftp.close()
s.close()
Result = '%s \033[32m传送完成\033[m' % PathList
return Result
def Ssh(host_ip,Operation,user_name,user_pwd,root_name,root_pwd,Cmd=None,PathList=None,port=22):
msg = "\033[32m-----------Result:%s----------\033[m" % host_ip
try:
if Operation == 'Ssh_Cmd':
Result = Ssh_Cmd(host_ip=host_ip,Cmd=Cmd,user_name=user_name,user_pwd=user_pwd,port=port)
elif Operation == 'Ssh_Su_Cmd':
Result = Ssh_Su_Cmd(host_ip=host_ip,Cmd=Cmd,user_name=user_name,user_pwd=user_pwd,root_name=root_name,root_pwd=root_pwd,port=port)
elif Operation == 'Ssh_Script':
Send_File(host_ip=host_ip,PathList=PathList,user_name=user_name,user_pwd=user_pwd,port=port)
Script_Head = open(PathList[0]).readline().strip()
LocalPath = re.sub('^\./','',PathList[0].rstrip('/'))
Cmd = '%s /tmp/%s' %( re.sub('^#!','',Script_Head), os.path.basename( LocalPath ))
Result = Ssh_Cmd(host_ip=host_ip,Cmd=Cmd,user_name=user_name,user_pwd=user_pwd,port=port)
elif Operation == 'Ssh_Su_Script':
Send_File(host_ip=host_ip,PathList=PathList,user_name=user_name,user_pwd=user_pwd,port=port)
Script_Head = open(PathList[0]).readline().strip()
LocalPath = re.sub('^\./','',PathList[0].rstrip('/'))
Cmd = '%s /tmp/%s' %( re.sub('^#!','',Script_Head), os.path.basename( LocalPath ))
Result = Ssh_Su_Cmd(host_ip=host_ip,Cmd=Cmd,user_name=user_name,user_pwd=user_pwd,root_name=root_name,root_pwd=root_pwd,port=port)
elif Operation == 'Send_File':
Result = Send_File(host_ip=host_ip,PathList=PathList,user_name=user_name,user_pwd=user_pwd,port=port)
else:
Result = '操作不存在'
except socket.error:
Result = '\033[31m主机或端口错误\033[m'
except paramiko.AuthenticationException:
Result = '\033[31m用户名或密码错误\033[m'
except paramiko.BadHostKeyException:
Result = '\033[31mBad host key\033[m['
except IOError:
Result = '\033[31m远程主机已存在非空目录或没有写权限\033[m'
except:
Result = '\033[31m未知错误\033[m'
r.put('%s\n%s\n' %(msg,Result))
def Concurrent(Conf,Operation,user_name,user_pwd,root_name,root_pwd,Cmd=None,PathList=None,port=22):
# 读取配置文件
f=open(Conf)
list = f.readlines()
f.close()
# 执行总计
total = 0
# 并发执行
for host_info in list:
# 判断配置文件中注释行跳过
if host_info.startswith('#'):
continue
# 取变量,其中任意变量未取到就跳过执行
try:
host_ip=host_info.split()[0]
#user_name=host_info.split()[1]
#user_pwd=host_info.split()[2]
except:
print('Profile error: %s' %(host_info) )
continue
try:
port=int(host_info.split()[3])
except:
port=22
total +=1
p = multiprocessing.Process(target=Ssh,args=(host_ip,Operation,user_name,user_pwd,root_name,root_pwd,Cmd,PathList,port))
p.start()
# 打印执行结果
for j in range(total):
print(r.get() )
if Operation == 'Ssh_Script' or Operation == 'Ssh_Su_Script':
successful = q.qsize() / 2
else:
successful = q.qsize()
print('\033[32m执行完毕[总执行:%s 成功:%s 失败:%s]\033[m' %(total,successful,total - successful) )
q.close()
r.close()
def Help():
print(''' 1.执行命令
2.执行脚本 \033[32m[位置1脚本(必须带脚本头),后可带执行脚本所需要的包\文件\文件夹路径,空格分隔]\033[m
3.发送文件 \033[32m[传送的包\文件\文件夹路径,空格分隔]\033[m
退出: 0\exit\quit
帮助: help\h\?
注意: 发送文件默认为/tmp下,如已存在同名文件会被强制覆盖,非空目录则中断操作.执行脚本先将本地脚本及包发送远程主机上,发送规则同发送文件
''')
if __name__=='__main__':
# 定义root账号信息
root_name = 'root'
root_pwd = 'peterli'
user_name='peterli'
user_pwd='<++(3Ie'
# 配置文件
Conf='serverlist.conf'
if not os.path.isfile(Conf):
print('\033[33m配置文件 %s 不存在\033[m' %(Conf) )
sys.exit()
Help()
while True:
i = raw_input("\033[35m[请选择操作]: \033[m").strip()
q = multiprocessing.Queue()
r = multiprocessing.Queue()
if i == '1':
if user_name == root_name:
Operation = 'Ssh_Cmd'
else:
Operation = 'Ssh_Su_Cmd'
Cmd = raw_input('CMD: ').strip()
if len(Cmd) == 0:
print('\033[33m命令为空\033[m')
continue
Concurrent(Conf=Conf,Operation=Operation,user_name=user_name,user_pwd=user_pwd,root_name=root_name,root_pwd=root_pwd,Cmd=Cmd)
elif i == '2':
if user_name == root_name:
Operation = 'Ssh_Script'
else:
Operation = 'Ssh_Su_Script'
PathList = raw_input('\033[36m本地脚本路径: \033[m').strip().split()
if len(PathList) == 0:
print('\033[33m路径为空\033[m')
continue
if not os.path.isfile(PathList[0]):
print('\033[33m本地路径 %s 不存在或不是文件\033[m' %(PathList[0]) )
continue
for LocalPath in PathList[1:]:
if not os.path.exists(LocalPath):
print('\033[33m本地路径 %s 不存在\033[m' %(LocalPath) )
break
else:
Concurrent(Conf=Conf,Operation=Operation,user_name=user_name,user_pwd=user_pwd,root_name=root_name,root_pwd=root_pwd,PathList=PathList)
elif i == '3':
Operation = 'Send_File'
PathList = raw_input('\033[36m本地路径: \033[m').strip().split()
if len(PathList) == 0:
print('\033[33m路径为空\033[m')
continue
for LocalPath in PathList:
if not os.path.exists(LocalPath):
print('\033[33m本地路径 %s 不存在\033[m' %(LocalPath) )
break
else:
Concurrent(Conf=Conf,Operation=Operation,user_name=user_name,user_pwd=user_pwd,root_name=root_name,root_pwd=root_pwd,PathList=PathList)
elif i == '0' or i == 'exit' or i == 'quit':
print("\033[34m退出LazyManage脚本\033[m")
sys.exit()
elif i == 'help' or i == 'h' or i == '?':
Help()
epoll非阻塞长链接
server
#!/usr/bin/python
#-*- coding:utf-8 -*-
import socket, select, logging, errno
import os, sys, json
def cmdRunner(input):
import commands
cmd_ret = commands.getstatusoutput(input)
return json.dumps({'ret':cmd_ret[0], 'out':cmd_ret[1]}, separators=(',', ':'))
class _State:
def __init__(self):
self.state = "read"
self.have_read = 0
self.need_read = 10
self.have_write = 0
self.need_write = 0
self.data = ""
__all__ = ['nbNet']
class nbNet:
def __init__(self, host, port, logic):
self.host = host
self.port = port
self.logic = logic
self.sm = {
"read":self.aread,
"write":self.awrite,
"process":self.aprocess,
"closing":self.aclose,
}
def run(self):
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
except socket.error, msg:
print("create socket failed")
try:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error, msg:
print("setsocketopt SO_REUSEADDR failed")
try:
self.sock.bind((self.host, self.port))
except socket.error, msg:
print("bind failed")
try:
self.sock.listen(10)
except socket.error, msg:
print(msg)
try:
self.epoll_fd = select.epoll()
# 向 epoll 句柄中注册 新来socket链接,监听可读事件
self.epoll_fd.register(self.sock.fileno(), select.EPOLLIN )
except select.error, msg:
print(msg)
self.STATE = {}
while True:
print self.STATE
# epoll 等待事件回调收发数据
epoll_list = self.epoll_fd.poll()
for fd, events in epoll_list:
if select.EPOLLHUP & events:
print 'EPOLLHUP'
self.STATE[fd][2].state = "closing"
elif select.EPOLLERR & events:
print 'EPOLLERR'
self.STATE[fd][2].state = "closing"
self.state_machine(fd)
def state_machine(self, fd):
if fd == self.sock.fileno():
print "state_machine fd %s accept" % fd
# fd与初始监听的fd一致,新创建一个连接
conn, addr = self.sock.accept()
# 设置为非阻塞
conn.setblocking(0)
self.STATE[conn.fileno()] = [conn, addr, _State()]
# 将新建立的链接注册在epoll句柄中,监听可读事件,并设置为EPOLLET高速边缘触发,即触发后不会再次触发直到新接收数据
self.epoll_fd.register(conn.fileno(), select.EPOLLET | select.EPOLLIN )
else:
# 否则为历史已存在的fd,调用对应的状态方法
print "state_machine fd %s %s" % (fd,self.STATE[fd][2].state)
stat = self.STATE[fd][2].state
self.sm[stat](fd)
def aread(self, fd):
try:
# 接收当前fd的可读事件中的数据
one_read = self.STATE[fd][0].recv(self.STATE[fd][2].need_read)
if len(one_read) == 0:
# 接收错误改变状态为关闭
self.STATE[fd][2].state = "closing"
self.state_machine(fd)
return
# 将历史接收的数据叠加
self.STATE[fd][2].data += one_read
self.STATE[fd][2].have_read += len(one_read)
self.STATE[fd][2].need_read -= len(one_read)
# 接收协议的10个字符
if self.STATE[fd][2].have_read == 10:
# 通过10个字符得知下次应该具体接收多少字节,存入状态字典中
self.STATE[fd][2].need_read += int(self.STATE[fd][2].data)
self.STATE[fd][2].data = ''
# 调用状态机重新处理
self.state_machine(fd)
elif self.STATE[fd][2].need_read == 0:
# 当接全部收完毕,改变状态,去执行具体服务
self.STATE[fd][2].state = 'process'
self.state_machine(fd)
except socket.error, msg:
self.STATE[fd][2].state = "closing"
print(msg)
self.state_machine(fd)
return
def aprocess(self, fd):
# 执行具体执行方法 cmdRunner 得到符合传输协议的返回结果
response = self.logic(self.STATE[fd][2].data)
self.STATE[fd][2].data = "%010d%s"%(len(response), response)
self.STATE[fd][2].need_write = len(self.STATE[fd][2].data)
# 改变为写的状态
self.STATE[fd][2].state = 'write'
# 改变监听事件为写
self.epoll_fd.modify(fd, select.EPOLLET | select.EPOLLOUT)
self.state_machine(fd)
def awrite(self, fd):
try:
last_have_send = self.STATE[fd][2].have_write
# 发送返回给客户端的数据
have_send = self.STATE[fd][0].send(self.STATE[fd][2].data[last_have_send:])
self.STATE[fd][2].have_write += have_send
self.STATE[fd][2].need_write -= have_send
if self.STATE[fd][2].need_write == 0 and self.STATE[fd][2].have_write != 0:
# 发送完成,重新初始化状态,并将监听写事件改回读事件
self.STATE[fd][2] = _State()
self.epoll_fd.modify(fd, select.EPOLLET | select.EPOLLIN)
except socket.error, msg:
self.STATE[fd][2].state = "closing"
self.state_machine(fd)
print(msg)
return
def aclose(self, fd):
try:
print 'Error: %s:%d' %(self.STATE[fd][1][0] ,self.STATE[fd][1][1])
# 取消fd的事件监听
self.epoll_fd.unregister(fd)
# 关闭异常链接
self.STATE[fd][0].close()
# 删除fd的状态信息
self.STATE.pop(fd)
except:
print 'Close the abnormal'
if __name__ == "__main__":
HOST = '0.0.0.0'
PORT = 50005
nb = nbNet(HOST, PORT, cmdRunner)
nb.run()
client
#!/usr/bin/env python
import socket, sys, os
HOST = '0.0.0.0'
PORT = 50005
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
cmd = sys.argv[1]
while True:
s.sendall("%010d%s"%(len(cmd), cmd))
print cmd
count = s.recv(10)
if not count:
print '-----------'
print count
sys.exit()
count = int(count)
buf = s.recv(count)
print buf
不定期更新下载地址:
https://github.com/liquanzhou/ops_doc
请勿删除信息, 植入广告, 抵制不道德行为
|
test_urllib.py
|
"""Regression tests for what was in Python 2's "urllib" module"""
import urllib.parse
import urllib.request
import urllib.error
import http.client
import email.message
import io
import unittest
from unittest.mock import patch
from test import support
import os
try:
import ssl
except ImportError:
ssl = None
import sys
import tempfile
from nturl2path import url2pathname, pathname2url
from base64 import b64encode
import collections
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
# Shortcut for testing FancyURLopener
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""urlopen(url [, data]) -> open file-like object"""
global _urlopener
if proxies is not None:
opener = urllib.request.FancyURLopener(proxies=proxies)
elif not _urlopener:
opener = FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
def FancyURLopener():
with support.check_warnings(
('FancyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
return urllib.request.FancyURLopener()
def fakehttp(fakedata, mock_close=False):
class FakeSocket(io.BytesIO):
io_refs = 1
def sendall(self, data):
FakeHTTPConnection.buf = data
def makefile(self, *args, **kwds):
self.io_refs += 1
return self
def read(self, amt=None):
if self.closed:
return b""
return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed:
return b""
return io.BytesIO.readline(self, length)
def close(self):
self.io_refs -= 1
if self.io_refs == 0:
io.BytesIO.close(self)
class FakeHTTPConnection(http.client.HTTPConnection):
# buffer to store data for verification in urlopen tests.
buf = None
def connect(self):
self.sock = FakeSocket(self.fakedata)
type(self).fakesock = self.sock
if mock_close:
# bpo-36918: HTTPConnection destructor calls close() which calls
# flush(). Problem: flush() calls self.fp.flush() which raises
# "ValueError: I/O operation on closed file" which is logged as an
# "Exception ignored in". Override close() to silence this error.
def close(self):
pass
FakeHTTPConnection.fakedata = fakedata
return FakeHTTPConnection
class FakeHTTPMixin(object):
def fakehttp(self, fakedata, mock_close=False):
fake_http_class = fakehttp(fakedata, mock_close=mock_close)
self._connection_class = http.client.HTTPConnection
http.client.HTTPConnection = fake_http_class
def unfakehttp(self):
http.client.HTTPConnection = self._connection_class
class FakeFTPMixin(object):
def fakeftp(self):
class FakeFtpWrapper(object):
def __init__(self, user, passwd, host, port, dirs, timeout=None,
persistent=True):
pass
def retrfile(self, file, type):
return io.BytesIO(), 0
def close(self):
pass
self._ftpwrapper_class = urllib.request.ftpwrapper
urllib.request.ftpwrapper = FakeFtpWrapper
def unfakeftp(self):
urllib.request.ftpwrapper = self._ftpwrapper_class
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
# Create a temp file to use for testing
self.text = bytes("test_urllib: %s\n" % self.__class__.__name__,
"ascii")
f = open(support.TESTFN, 'wb')
try:
f.write(self.text)
finally:
f.close()
self.pathname = support.TESTFN
self.returned_obj = urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual(b'', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it here and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), email.message.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertIsNone(self.returned_obj.getcode())
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison.
# Use the iterator in the usual implicit way to test for ticket #4608.
for line in self.returned_obj:
self.assertEqual(line, self.text)
def test_relativelocalfile(self):
self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in list(os.environ):
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.request.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
# List of no_proxies with space.
self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234')
self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com'))
self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888'))
self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234'))
def test_proxy_cgi_ignore(self):
try:
self.env.set('HTTP_PROXY', 'http://somewhere:3128')
proxies = urllib.request.getproxies_environment()
self.assertEqual('http://somewhere:3128', proxies['http'])
self.env.set('REQUEST_METHOD', 'GET')
proxies = urllib.request.getproxies_environment()
self.assertNotIn('http', proxies)
finally:
self.env.unset('REQUEST_METHOD')
self.env.unset('HTTP_PROXY')
def test_proxy_bypass_environment_host_match(self):
bypass = urllib.request.proxy_bypass_environment
self.env.set('NO_PROXY',
'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t')
self.assertTrue(bypass('localhost'))
self.assertTrue(bypass('LocalHost')) # MixedCase
self.assertTrue(bypass('LOCALHOST')) # UPPERCASE
self.assertTrue(bypass('.localhost'))
self.assertTrue(bypass('newdomain.com:1234'))
self.assertTrue(bypass('.newdomain.com:1234'))
self.assertTrue(bypass('foo.d.o.t')) # issue 29142
self.assertTrue(bypass('d.o.t'))
self.assertTrue(bypass('anotherdomain.com:8888'))
self.assertTrue(bypass('.anotherdomain.com:8888'))
self.assertTrue(bypass('www.newdomain.com:1234'))
self.assertFalse(bypass('prelocalhost'))
self.assertFalse(bypass('newdomain.com')) # no port
self.assertFalse(bypass('newdomain.com:1235')) # wrong port
def test_proxy_bypass_environment_always_match(self):
bypass = urllib.request.proxy_bypass_environment
self.env.set('NO_PROXY', '*')
self.assertTrue(bypass('newdomain.com'))
self.assertTrue(bypass('newdomain.com:1234'))
self.env.set('NO_PROXY', '*, anotherdomain.com')
self.assertTrue(bypass('anotherdomain.com'))
self.assertFalse(bypass('newdomain.com'))
self.assertFalse(bypass('newdomain.com:1234'))
def test_proxy_bypass_environment_newline(self):
bypass = urllib.request.proxy_bypass_environment
self.env.set('NO_PROXY',
'localhost, anotherdomain.com, newdomain.com:1234')
self.assertFalse(bypass('localhost\n'))
self.assertFalse(bypass('anotherdomain.com:8888\n'))
self.assertFalse(bypass('newdomain.com:1234\n'))
class ProxyTests_withOrderedEnv(unittest.TestCase):
def setUp(self):
# We need to test conditions, where variable order _is_ significant
self._saved_env = os.environ
# Monkey patch os.environ, start with empty fake environment
os.environ = collections.OrderedDict()
def tearDown(self):
os.environ = self._saved_env
def test_getproxies_environment_prefer_lowercase(self):
# Test lowercase preference with removal
os.environ['no_proxy'] = ''
os.environ['No_Proxy'] = 'localhost'
self.assertFalse(urllib.request.proxy_bypass_environment('localhost'))
self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary'))
os.environ['http_proxy'] = ''
os.environ['HTTP_PROXY'] = 'http://somewhere:3128'
proxies = urllib.request.getproxies_environment()
self.assertEqual({}, proxies)
# Test lowercase preference of proxy bypass and correct matching including ports
os.environ['no_proxy'] = 'localhost, noproxy.com, my.proxy:1234'
os.environ['No_Proxy'] = 'xyz.com'
self.assertTrue(urllib.request.proxy_bypass_environment('localhost'))
self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678'))
self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234'))
self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy'))
self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary'))
# Test lowercase preference with replacement
os.environ['http_proxy'] = 'http://somewhere:3128'
os.environ['Http_Proxy'] = 'http://somewhereelse:3128'
proxies = urllib.request.getproxies_environment()
self.assertEqual('http://somewhere:3128', proxies['http'])
class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin):
"""Test urlopen() opening a fake http connection."""
def check_read(self, ver):
self.fakehttp(b"HTTP/" + ver + b" 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
fp = urllib.request.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_willclose(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
resp = urlopen("http://www.python.org")
self.assertTrue(resp.fp.will_close)
finally:
self.unfakehttp()
@unittest.skipUnless(ssl, "ssl module required")
def test_url_with_control_char_rejected(self):
for char_no in list(range(0, 0x21)) + [0x7f]:
char = chr(char_no)
schemeless_url = f"//localhost:7777/test{char}/"
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
try:
# We explicitly test urllib.request.urlopen() instead of the top
# level 'def urlopen()' function defined in this... (quite ugly)
# test suite. They use different url opening codepaths. Plain
# urlopen uses FancyURLOpener which goes via a codepath that
# calls urllib.parse.quote() on the URL which makes all of the
# above attempts at injection within the url _path_ safe.
escaped_char_repr = repr(char).replace('\\', r'\\')
InvalidURL = http.client.InvalidURL
with self.assertRaisesRegex(
InvalidURL, f"contain control.*{escaped_char_repr}"):
urllib.request.urlopen(f"http:{schemeless_url}")
with self.assertRaisesRegex(
InvalidURL, f"contain control.*{escaped_char_repr}"):
urllib.request.urlopen(f"https:{schemeless_url}")
# This code path quotes the URL so there is no injection.
resp = urlopen(f"http:{schemeless_url}")
self.assertNotIn(char, resp.geturl())
finally:
self.unfakehttp()
@unittest.skipUnless(ssl, "ssl module required")
def test_url_with_newline_header_injection_rejected(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
schemeless_url = "//" + host + ":8080/test/?test=a"
try:
# We explicitly test urllib.request.urlopen() instead of the top
# level 'def urlopen()' function defined in this... (quite ugly)
# test suite. They use different url opening codepaths. Plain
# urlopen uses FancyURLOpener which goes via a codepath that
# calls urllib.parse.quote() on the URL which makes all of the
# above attempts at injection within the url _path_ safe.
InvalidURL = http.client.InvalidURL
with self.assertRaisesRegex(
InvalidURL, r"contain control.*\\r.*(found at least . .)"):
urllib.request.urlopen(f"http:{schemeless_url}")
with self.assertRaisesRegex(InvalidURL, r"contain control.*\\n"):
urllib.request.urlopen(f"https:{schemeless_url}")
# This code path quotes the URL so there is no injection.
resp = urlopen(f"http:{schemeless_url}")
self.assertNotIn(' ', resp.geturl())
self.assertNotIn('\r', resp.geturl())
self.assertNotIn('\n', resp.geturl())
finally:
self.unfakehttp()
def test_read_0_9(self):
# "0.9" response accepted (but not "simple responses" without
# a status line)
self.check_read(b"0.9")
def test_read_1_0(self):
self.check_read(b"1.0")
def test_read_1_1(self):
self.check_read(b"1.1")
def test_read_bogus(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''', mock_close=True)
try:
self.assertRaises(OSError, urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file://guidocomputer.athome.com:/python/license
Connection: close
Content-Type: text/html; charset=iso-8859-1
''', mock_close=True)
try:
msg = "Redirection to url 'file:"
with self.assertRaisesRegex(urllib.error.HTTPError, msg):
urlopen("http://python.org/")
finally:
self.unfakehttp()
def test_redirect_limit_independent(self):
# Ticket #12923: make sure independent requests each use their
# own retry limit.
for i in range(FancyURLopener().maxtries):
self.fakehttp(b'''HTTP/1.1 302 Found
Location: file://guidocomputer.athome.com:/python/license
Connection: close
''', mock_close=True)
try:
self.assertRaises(urllib.error.HTTPError, urlopen,
"http://something")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises OSError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp(b'')
try:
self.assertRaises(OSError, urlopen, "http://something")
finally:
self.unfakehttp()
def test_missing_localfile(self):
# Test for #10836
with self.assertRaises(urllib.error.URLError) as e:
urlopen('file://localhost/a/file/which/doesnot/exists.py')
self.assertTrue(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_file_notexists(self):
fd, tmp_file = tempfile.mkstemp()
tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')
try:
self.assertTrue(os.path.exists(tmp_file))
with urlopen(tmp_fileurl) as fobj:
self.assertTrue(fobj)
finally:
os.close(fd)
os.unlink(tmp_file)
self.assertFalse(os.path.exists(tmp_file))
with self.assertRaises(urllib.error.URLError):
urlopen(tmp_fileurl)
def test_ftp_nohost(self):
test_ftp_url = 'ftp:///path'
with self.assertRaises(urllib.error.URLError) as e:
urlopen(test_ftp_url)
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_ftp_nonexisting(self):
with self.assertRaises(urllib.error.URLError) as e:
urlopen('ftp://localhost/a/file/which/doesnot/exists.py')
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
@patch.object(urllib.request, 'MAXFTPCACHE', 0)
def test_ftp_cache_pruning(self):
self.fakeftp()
try:
urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, [])
urlopen('ftp://localhost')
finally:
self.unfakeftp()
def test_userpass_inurl(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://user:pass@python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://user:pass@python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_userpass_inurl_w_spaces(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
userpass = "a b:c d"
url = "http://{}@python.org/".format(userpass)
fakehttp_wrapper = http.client.HTTPConnection
authorization = ("Authorization: Basic %s\r\n" %
b64encode(userpass.encode("ASCII")).decode("ASCII"))
fp = urlopen(url)
# The authorization header must be in place
self.assertIn(authorization, fakehttp_wrapper.buf.decode("UTF-8"))
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
# the spaces are quoted in URL so no match
self.assertNotEqual(fp.geturl(), url)
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_URLopener_deprecation(self):
with support.check_warnings(('',DeprecationWarning)):
urllib.request.URLopener()
@unittest.skipUnless(ssl, "ssl module required")
def test_cafile_and_context(self):
context = ssl.create_default_context()
with support.check_warnings(('', DeprecationWarning)):
with self.assertRaises(ValueError):
urllib.request.urlopen(
"https://localhost", cafile="/nonexistent/path", context=context
)
class urlopen_DataTests(unittest.TestCase):
"""Test urlopen() opening a data URL."""
def setUp(self):
# text containing URL special- and unicode-characters
self.text = "test data URLs :;,%=& \u00f6 \u00c4 "
# 2x1 pixel RGB PNG image with one black and one white pixel
self.image = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x02\x00\x00\x00'
b'\x01\x08\x02\x00\x00\x00{@\xe8\xdd\x00\x00\x00\x01sRGB\x00\xae'
b'\xce\x1c\xe9\x00\x00\x00\x0fIDAT\x08\xd7c```\xf8\xff\xff?\x00'
b'\x06\x01\x02\xfe\no/\x1e\x00\x00\x00\x00IEND\xaeB`\x82')
self.text_url = (
"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3"
"D%26%20%C3%B6%20%C3%84%20")
self.text_url_base64 = (
"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs"
"sJT0mIPYgxCA%3D")
# base64 encoded data URL that contains ignorable spaces,
# such as "\n", " ", "%0A", and "%20".
self.image_url = (
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\n"
"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 "
"vHgAAAABJRU5ErkJggg%3D%3D%0A%20")
self.text_url_resp = urllib.request.urlopen(self.text_url)
self.text_url_base64_resp = urllib.request.urlopen(
self.text_url_base64)
self.image_url_resp = urllib.request.urlopen(self.image_url)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.text_url_resp, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_info(self):
self.assertIsInstance(self.text_url_resp.info(), email.message.Message)
self.assertEqual(self.text_url_base64_resp.info().get_params(),
[('text/plain', ''), ('charset', 'ISO-8859-1')])
self.assertEqual(self.image_url_resp.info()['content-length'],
str(len(self.image)))
self.assertEqual(urllib.request.urlopen("data:,").info().get_params(),
[('text/plain', ''), ('charset', 'US-ASCII')])
def test_geturl(self):
self.assertEqual(self.text_url_resp.geturl(), self.text_url)
self.assertEqual(self.text_url_base64_resp.geturl(),
self.text_url_base64)
self.assertEqual(self.image_url_resp.geturl(), self.image_url)
def test_read_text(self):
self.assertEqual(self.text_url_resp.read().decode(
dict(self.text_url_resp.info().get_params())['charset']), self.text)
def test_read_text_base64(self):
self.assertEqual(self.text_url_base64_resp.read().decode(
dict(self.text_url_base64_resp.info().get_params())['charset']),
self.text)
def test_read_image(self):
self.assertEqual(self.image_url_resp.read(), self.image)
def test_missing_comma(self):
self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain')
def test_invalid_base64_data(self):
# missing padding character
self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=')
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(support.TESTFN)
self.text = b'testing urllib.urlretrieve'
try:
FILE = open(support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
filePath = os.path.abspath(filePath)
try:
filePath.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("filePath is not encodable to utf8")
return "file://%s" % urllib.request.pathname2url(filePath)
def createNewTempFile(self, data=b""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.request.urlretrieve("file:%s" % support.TESTFN)
self.assertEqual(result[0], support.TESTFN)
self.assertIsInstance(result[1], email.message.Message,
"did not get an email.message.Message instance "
"as second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.request.urlretrieve(self.constructLocalFileUrl(
support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = open(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(block_count, block_read_size, file_size, count_holder=[0]):
self.assertIsInstance(block_count, int)
self.assertIsInstance(block_read_size, int)
self.assertIsInstance(file_size, int)
self.assertEqual(block_count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.request.urlretrieve(
self.constructLocalFileUrl(support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile()
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 5)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][2], 5)
self.assertEqual(report[1][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 8193)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][2], 8193)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[1][1], 8192)
self.assertEqual(report[2][1], 8192)
class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urllib.urlretrieve() using fake http connections"""
def test_short_content_raises_ContentTooShortError(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
def _reporthook(par1, par2, par3):
pass
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve(support.TEST_HTTP_URL,
reporthook=_reporthook)
finally:
self.unfakehttp()
def test_short_content_raises_ContentTooShortError_without_reporthook(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve(support.TEST_HTTP_URL)
finally:
self.unfakehttp()
class QuotingTests(unittest.TestCase):
r"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 3986 (Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>.
The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a
character properly. Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-~"])
result = urllib.parse.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %r != %r" % (do_not_quote, result))
result = urllib.parse.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %r != %r" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.parse.quote.__defaults__[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.parse.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
result = urllib.parse.quote_plus(quote_by_default,
safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %r != %r" %
(quote_by_default, result))
# Safe expressed as bytes rather than str
result = urllib.parse.quote(quote_by_default, safe=b"<>")
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
# "Safe" non-ASCII characters should have no effect
# (Since URIs are not allowed to have non-ASCII characters)
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
# Same as above, but using a bytes rather than str
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append(r'<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.parse.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): "
"%s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.parse.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.parse.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %r != %r" % (expected, result))
result = urllib.parse.quote_plus(partial_quote)
self.assertEqual(expected, result,
"using quote_plus(): %r != %r" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.parse.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %r != %r" % (result, hexescape(' ')))
result = urllib.parse.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %r != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.parse.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
# Test with bytes
self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),
'alpha%2Bbeta+gamma')
# Test with safe bytes
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),
'alpha+beta+gamma')
def test_quote_bytes(self):
# Bytes should quote directly to percent-encoded values
given = b"\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Encoding argument should raise type error on bytes input
self.assertRaises(TypeError, urllib.parse.quote, given,
encoding="latin-1")
# quote_from_bytes should work the same
result = urllib.parse.quote_from_bytes(given)
self.assertEqual(expect, result,
"using quote_from_bytes(): %r != %r"
% (expect, result))
def test_quote_with_unicode(self):
# Characters in Latin-1 range, encoded by default in UTF-8
given = "\xa2\xd8ab\xff"
expect = "%C2%A2%C3%98ab%C3%BF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded by with None (default)
result = urllib.parse.quote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded with Latin-1
given = "\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded by default in UTF-8
given = "\u6f22\u5b57" # "Kanji"
expect = "%E6%BC%A2%E5%AD%97"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with Latin-1
given = "\u6f22\u5b57"
self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,
encoding="latin-1")
# Characters in BMP, encoded with Latin-1, with replace error handling
given = "\u6f22\u5b57"
expect = "%3F%3F" # "??"
result = urllib.parse.quote(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, Latin-1, with xmlcharref error handling
given = "\u6f22\u5b57"
expect = "%26%2328450%3B%26%2323383%3B" # "漢字"
result = urllib.parse.quote(given, encoding="latin-1",
errors="xmlcharrefreplace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
def test_quote_plus_with_unicode(self):
# Encoding (latin-1) test for quote_plus
given = "\xa2\xd8 \xff"
expect = "%A2%D8+%FF"
result = urllib.parse.quote_plus(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
# Errors test for quote_plus
given = "ab\u6f22\u5b57 cd"
expect = "ab%3F%3F+cd"
result = urllib.parse.quote_plus(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.parse.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ())
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'')
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
# unquote_to_bytes
given = '%xab'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%x'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ())
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = b'\xab\xea'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquote_to_bytes(self):
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = b'br\xc3\xbcckner_sapporo_20050930.doc'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test on a string with unescaped non-ASCII characters
# (Technically an invalid URI; expect those characters to be UTF-8
# encoded).
result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC")
expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc"
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input
given = b'%A2%D8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input, with unescaped non-ASCII bytes
# (Technically an invalid URI; expect those bytes to be preserved)
given = b'%A2\xd8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquote_with_unicode(self):
# Characters in the Latin-1 range, encoded with UTF-8
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = 'br\u00fcckner_sapporo_20050930.doc'
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with None (default)
result = urllib.parse.unquote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with Latin-1
result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',
encoding="latin-1")
expect = 'br\u00fcckner_sapporo_20050930.doc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with UTF-8
given = "%E6%BC%A2%E5%AD%97"
expect = "\u6f22\u5b57" # "Kanji"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence
given = "%F3%B1"
expect = "\ufffd" # Replacement character
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, replace errors
result = urllib.parse.unquote(given, errors="replace")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, ignoring errors
given = "%F3%B1"
expect = ""
result = urllib.parse.unquote(given, errors="ignore")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, UTF-8
result = urllib.parse.unquote("\u6f22%C3%BC")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, Latin-1
# (Note, the string contains non-Latin-1-representable characters)
result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.parse.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3']))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
def test_empty_sequence(self):
self.assertEqual("", urllib.parse.urlencode({}))
self.assertEqual("", urllib.parse.urlencode([]))
def test_nonstring_values(self):
self.assertEqual("a=1", urllib.parse.urlencode({"a": 1}))
self.assertEqual("a=None", urllib.parse.urlencode({"a": None}))
def test_nonstring_seq_values(self):
self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True))
self.assertEqual("a=None&a=a",
urllib.parse.urlencode({"a": [None, "a"]}, True))
data = collections.OrderedDict([("a", 1), ("b", 1)])
self.assertEqual("a=a&a=b",
urllib.parse.urlencode({"a": data}, True))
def test_urlencode_encoding(self):
# ASCII encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Default is UTF-8 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
# Latin-1 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_encoding_doseq(self):
# ASCII Encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, doseq=True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# ASCII Encoding. On a sequence of values.
given = (("\u00a0", (1, "\u00c1")),)
expect = '%3F=1&%3F=%3F'
result = urllib.parse.urlencode(given, True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Utf-8
given = (("\u00a0", "\u00c1"),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%C2%A0=42&%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# latin-1
given = (("\u00a0", "\u00c1"),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%A0=42&%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_bytes(self):
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0%24=%C1%24'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# Sequence of values
given = ((b'\xa0\x24', (42, b'\xc1\x24')),)
expect = '%A0%24=42&%A0%24=%C1%24'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
def test_urlencode_encoding_safe_parameter(self):
# Send '$' (\x24) as safe character
# Default utf-8 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, doseq=True, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
# Safe parameter in sequence
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$")
self.assertEqual(expect, result)
# Test all above in latin-1 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$",
encoding="latin-1")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0$=%C1$'
result = urllib.parse.urlencode(given, doseq=True, safe=":$",
encoding="latin-1")
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$",
encoding="latin-1")
self.assertEqual(expect, result)
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.parse.quote("make sure")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the urllib.url2path function.')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.request.url2pathname(url)
self.assertEqual(expect, result,
'urllib.request..url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
'urllib.request.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_thishost(self):
"""Test the urllib.request.thishost utility function returns a tuple"""
self.assertIsInstance(urllib.request.thishost(), tuple)
class URLopener_Tests(FakeHTTPMixin, unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.request.URLopener):
def open_spam(self, url):
return url
with support.check_warnings(
('DummyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
@support.ignore_warnings(category=DeprecationWarning)
def test_urlopener_retrieve_file(self):
with support.temp_dir() as tmpdir:
fd, tmpfile = tempfile.mkstemp(dir=tmpdir)
os.close(fd)
fileurl = "file:" + urllib.request.pathname2url(tmpfile)
filename, _ = urllib.request.URLopener().retrieve(fileurl)
# Some buildbots have TEMP folder that uses a lowercase drive letter.
self.assertEqual(os.path.normcase(filename), os.path.normcase(tmpfile))
@support.ignore_warnings(category=DeprecationWarning)
def test_urlopener_retrieve_remote(self):
url = "http://www.python.org/file.txt"
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
self.addCleanup(self.unfakehttp)
filename, _ = urllib.request.URLopener().retrieve(url)
self.assertEqual(os.path.splitext(filename)[1], ".txt")
@support.ignore_warnings(category=DeprecationWarning)
def test_local_file_open(self):
# bpo-35907, CVE-2019-9948: urllib must reject local_file:// scheme
class DummyURLopener(urllib.request.URLopener):
def open_local_file(self, url):
return url
for url in ('local_file://example', 'local-file://example'):
self.assertRaises(OSError, urllib.request.urlopen, url)
self.assertRaises(OSError, urllib.request.URLopener().open, url)
self.assertRaises(OSError, urllib.request.URLopener().retrieve, url)
self.assertRaises(OSError, DummyURLopener().open, url)
self.assertRaises(OSError, DummyURLopener().retrieve, url)
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic environments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen()
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
class RequestTests(unittest.TestCase):
"""Unit tests for urllib.request.Request."""
def test_default_values(self):
Request = urllib.request.Request
request = Request("http://www.python.org")
self.assertEqual(request.get_method(), 'GET')
request = Request("http://www.python.org", {})
self.assertEqual(request.get_method(), 'POST')
def test_with_method_arg(self):
Request = urllib.request.Request
request = Request("http://www.python.org", method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", {}, method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", method='GET')
self.assertEqual(request.get_method(), 'GET')
request.method = 'HEAD'
self.assertEqual(request.get_method(), 'HEAD')
class URL2PathNameTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(url2pathname("///C|"), 'C:')
self.assertEqual(url2pathname("///C:"), 'C:')
self.assertEqual(url2pathname("///C|/"), 'C:\\')
def test_converting_when_no_drive_letter(self):
# cannot end a raw string in \
self.assertEqual(url2pathname("///C/test/"), r'\\\C\test' '\\')
self.assertEqual(url2pathname("////C/test/"), r'\\C\test' '\\')
def test_simple_compare(self):
self.assertEqual(url2pathname("///C|/foo/bar/spam.foo"),
r'C:\foo\bar\spam.foo')
def test_non_ascii_drive_letter(self):
self.assertRaises(IOError, url2pathname, "///\u00e8|/")
def test_roundtrip_url2pathname(self):
list_of_paths = ['C:',
r'\\\C\test\\',
r'C:\foo\bar\spam.foo'
]
for path in list_of_paths:
self.assertEqual(url2pathname(pathname2url(path)), path)
class PathName2URLTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(pathname2url("C:"), '///C:')
self.assertEqual(pathname2url("C:\\"), '///C:')
def test_converting_when_no_drive_letter(self):
self.assertEqual(pathname2url(r"\\\folder\test" "\\"),
'/////folder/test/')
self.assertEqual(pathname2url(r"\\folder\test" "\\"),
'////folder/test/')
self.assertEqual(pathname2url(r"\folder\test" "\\"),
'/folder/test/')
def test_simple_compare(self):
self.assertEqual(pathname2url(r'C:\foo\bar\spam.foo'),
"///C:/foo/bar/spam.foo" )
def test_long_drive_letter(self):
self.assertRaises(IOError, pathname2url, "XX:\\")
def test_roundtrip_pathname2url(self):
list_of_paths = ['///C:',
'/////folder/test/',
'///C:/foo/bar/spam.foo']
for path in list_of_paths:
self.assertEqual(pathname2url(url2pathname(path)), path)
if __name__ == '__main__':
unittest.main()
|
main_vec_delay.py
|
import argparse
import math
from collections import namedtuple
from itertools import count
import numpy as np
from eval import eval_model_q
import copy
import torch
from ddpg_vec import DDPG
from ddpg_vec_hetero import DDPGH
import random
import pickle
from replay_memory import ReplayMemory, Transition, ReplayMemory_episode
from utils import *
import os
import time
from utils import n_actions, copy_actor_policy
from ddpg_vec import hard_update
import torch.multiprocessing as mp
from multiprocessing import Queue
from multiprocessing.sharedctypes import Value
import sys
from pathlib import Path
save_path = str(Path(os.path.abspath(__file__)).parents[2]) + '/results'
save_model_path = save_path + '/ckpt_plot'
tensorboard_path = save_path + '/runs'
from torch.utils.tensorboard import SummaryWriter
parser = argparse.ArgumentParser(description='PyTorch REINFORCE example')
parser.add_argument('--scenario', required=True,
help='name of the environment to run')
parser.add_argument('--gamma', type=float, default=0.95, metavar='G',
help='discount factor for reward (default: 0.99)')
parser.add_argument('--tau', type=float, default=0.01, metavar='G',
help='discount factor for model (default: 0.001)')
parser.add_argument('--ou_noise', type=bool, default=True)
parser.add_argument('--param_noise', type=bool, default=False)
parser.add_argument('--train_noise', default=False, action='store_true')
parser.add_argument('--noise_scale', type=float, default=0.3, metavar='G',
help='initial noise scale (default: 0.3)')
parser.add_argument('--final_noise_scale', type=float, default=0.3, metavar='G',
help='final noise scale (default: 0.3)')
parser.add_argument('--exploration_end', type=int, default=60000, metavar='N',
help='number of episodes with noise (default: 100)')
parser.add_argument('--seed', type=int, default=0, metavar='N',
help='random seed (default: 0)')
parser.add_argument('--batch_size', type=int, default=1024, metavar='N',
help='batch size (default: 128)')
parser.add_argument('--num_steps', type=int, default=25, metavar='N',
help='max episode length (default: 1000)')
parser.add_argument('--num_episodes', type=int, default=50000, metavar='N',
help='number of episodes (default: 1000)')
parser.add_argument('--hidden_size', type=int, default=128, metavar='N',
help='number of episodes (default: 128)')
parser.add_argument('--updates_per_step', type=int, default=8, metavar='N',
help='model updates per simulator step (default: 5)')
parser.add_argument('--critic_updates_per_step', type=int, default=8, metavar='N',
help='model updates per simulator step (default: 5)')
parser.add_argument('--replay_size', type=int, default=1000000, metavar='N',
help='size of replay buffer (default: 1000000)')
parser.add_argument('--actor_lr', type=float, default=1e-2,
help='(default: 1e-4)')
parser.add_argument('--critic_lr', type=float, default=1e-2,
help='(default: 1e-3)')
parser.add_argument('--fixed_lr', default=False, action='store_true')
parser.add_argument('--num_eval_runs', type=int, default=1000, help='number of runs per evaluation (default: 5)')
parser.add_argument("--exp_name", type=str, help="name of the experiment")
parser.add_argument("--save_dir", type=str, default=save_model_path,
help="directory in which training state and model should be saved")
parser.add_argument('--static_env', default=False, action='store_true')
parser.add_argument('--critic_type', type=str, default='mlp', help="Supports [mlp, gcn_mean, gcn_max]")
parser.add_argument('--actor_type', type=str, default='mlp', help="Supports [mlp, gcn_max]")
parser.add_argument('--critic_dec_cen', default='cen')
parser.add_argument("--env_agent_ckpt", type=str, default='ckpt_plot/simple_tag_v5_al0a10_4/agents.ckpt')
parser.add_argument('--shuffle', default=None, type=str, help='None|shuffle|sort')
parser.add_argument('--episode_per_update', type=int, default=4, metavar='N',
help='max episode length (default: 1000)')
parser.add_argument('--episode_per_actor_update', type=int, default=4)
parser.add_argument('--episode_per_critic_update', type=int, default=4)
parser.add_argument('--steps_per_actor_update', type=int, default=100)
parser.add_argument('--steps_per_critic_update', type=int, default=100)
#parser.add_argument('--episodes_per_update', type=int, default=4)
parser.add_argument('--target_update_mode', default='soft', help='soft | hard | episodic')
parser.add_argument('--cuda', default=False, action='store_true')
parser.add_argument('--eval_freq', type=int, default=1000)
args = parser.parse_args()
if args.exp_name is None:
args.exp_name = args.scenario + '_' + args.critic_type + '_' + args.target_update_mode + '_hiddensize' \
+ str(args.hidden_size) + '_' + str(args.seed)
print("=================Arguments==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
torch.set_num_threads(1)
device = torch.device("cuda:0" if torch.cuda.is_available() and args.cuda else "cpu")
env = make_env(args.scenario, None)
n_agents = env.n
env.seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
num_adversary = 0
n_actions = n_actions(env.action_space)
obs_dims = [env.observation_space[i].shape[0] for i in range(n_agents)]
obs_dims.insert(0, 0)
if 'hetero' in args.scenario:
import multiagent.scenarios as scenarios
groups = scenarios.load(args.scenario + ".py").Scenario().group
agent = DDPGH(args.gamma, args.tau, args.hidden_size,
env.observation_space[0].shape[0], n_actions[0], n_agents, obs_dims, 0,
args.actor_lr, args.critic_lr,
args.fixed_lr, args.critic_type, args.train_noise, args.num_episodes,
args.num_steps, args.critic_dec_cen, args.target_update_mode, device, groups=groups)
eval_agent = DDPGH(args.gamma, args.tau, args.hidden_size,
env.observation_space[0].shape[0], n_actions[0], n_agents, obs_dims, 0,
args.actor_lr, args.critic_lr,
args.fixed_lr, args.critic_type, args.train_noise, args.num_episodes,
args.num_steps, args.critic_dec_cen, args.target_update_mode, 'cpu', groups=groups)
else:
agent = DDPG(args.gamma, args.tau, args.hidden_size,
env.observation_space[0].shape[0], n_actions[0], n_agents, obs_dims, 0,
args.actor_lr, args.critic_lr,
args.fixed_lr, args.critic_type, args.actor_type, args.train_noise, args.num_episodes,
args.num_steps, args.critic_dec_cen, args.target_update_mode, device)
eval_agent = DDPG(args.gamma, args.tau, args.hidden_size,
env.observation_space[0].shape[0], n_actions[0], n_agents, obs_dims, 0,
args.actor_lr, args.critic_lr,
args.fixed_lr, args.critic_type, args.actor_type, args.train_noise, args.num_episodes,
args.num_steps, args.critic_dec_cen, args.target_update_mode, 'cpu')
memory = ReplayMemory(args.replay_size)
memory_e = ReplayMemory_episode(int(args.replay_size/args.num_steps))
feat_dims = []
for i in range(n_agents):
feat_dims.append(env.observation_space[i].shape[0])
# Find main agents index
unique_dims = list(set(feat_dims))
agents0 = [i for i, feat_dim in enumerate(feat_dims) if feat_dim == unique_dims[0]]
if len(unique_dims) > 1:
agents1 = [i for i, feat_dim in enumerate(feat_dims) if feat_dim == unique_dims[1]]
main_agents = agents0 if len(agents0) >= len(agents1) else agents1
else:
main_agents = agents0
rewards = []
total_numsteps = 0
updates = 0
exp_save_dir = os.path.join(args.save_dir, args.exp_name)
os.makedirs(exp_save_dir, exist_ok=True)
best_eval_reward, best_good_eval_reward, best_adversary_eval_reward = -1000000000, -1000000000, -1000000000
value_loss, policy_loss = 0.0, 0.0
start_time = time.time()
copy_actor_policy(agent, eval_agent)
torch.save({'agents': eval_agent}, os.path.join(exp_save_dir, 'agents_best.ckpt'))
# for mp test
test_q = Queue()
done_training = Value('i', False)
p = mp.Process(target=eval_model_q, args=(test_q, done_training, args))
p.start()
writer = SummaryWriter(tensorboard_path)
for i_episode in range(args.num_episodes):
obs_n = env.reset()
episode_reward = 0
episode_step = 0
agents_rew = [[] for _ in range(n_agents)]
x_e, action_e, mask_e, x_next_e, reward_e = [], [], [], [], []
while True:
action_n = agent.select_action(torch.Tensor(obs_n).to(device), action_noise=True,
param_noise=False).squeeze().cpu().numpy()
next_obs_n, reward_n, done_n, info = env.step(action_n)
total_numsteps += 1
episode_step += 1
terminal = (episode_step >= args.num_steps)
action = torch.Tensor(action_n).view(1, -1)
mask = torch.Tensor([[not done for done in done_n]])
next_x = torch.Tensor(np.concatenate(next_obs_n, axis=0)).view(1, -1)
episode_reward += np.sum(reward_n)
# delay reward
if done_n[0] or terminal:
reward = [episode_reward/n_agents]*n_agents
else:
reward = [0.0]*n_agents
x_e.append(np.concatenate(obs_n, axis=0).reshape(1,-1))
action_e.append(action_n.reshape(1,-1))
mask_e.append(np.array([[not done for done in done_n]]))
x_next_e.append(np.concatenate(next_obs_n, axis=0).reshape(1,-1))
# reward_e.append(np.array([reward_n]))
reward_e.append(np.array([reward]))
for i, r in enumerate(reward_n):
agents_rew[i].append(r)
obs_n = next_obs_n
n_update_iter = 5
# if len(memory) > args.batch_size:
if len(memory_e) > args.batch_size*5:
if total_numsteps % args.steps_per_actor_update == 0:
for _ in range(args.updates_per_step):
batch = memory_e.sample(args.batch_size)
policy_loss = agent.update_actor_parameters(batch, i, args.shuffle)
updates += 1
print('episode {}, p loss {}, p_lr {}'.
format(i_episode, policy_loss, agent.actor_lr))
if total_numsteps % args.steps_per_critic_update == 0:
value_losses = []
for _ in range(args.critic_updates_per_step):
batch = memory_e.sample(args.batch_size)
value_losses.append(agent.update_critic_parameters(batch, i, args.shuffle)[0])
updates += 1
# print(value_losses)
value_loss = np.mean(value_losses)
print('episode {}, q loss {}, q_lr {}'.
format(i_episode, value_loss, agent.critic_optim.param_groups[0]['lr']))
if args.target_update_mode == 'episodic':
hard_update(agent.critic_target, agent.critic)
if done_n[0] or terminal:
print('train epidoe reward', episode_reward)
episode_step = 0
memory_e.push(x_e, action_e, mask_e, x_next_e, reward_e)
x_e, action_e, mask_e, x_next_e, reward_e = [], [], [], [], []
break
if not args.fixed_lr:
agent.adjust_lr(i_episode)
# writer.add_scalar('reward/train', episode_reward, i_episode)
writer.add_scalar(args.exp_name+f'_baseline_{n_agents}agent_{args.seed}', episode_reward, i_episode)
rewards.append(episode_reward)
# if (i_episode + 1) % 1000 == 0 or ((i_episode + 1) >= args.num_episodes - 50 and (i_episode + 1) % 4 == 0):
if (i_episode + 1) % args.eval_freq == 0:
tr_log = {'num_adversary': 0,
'best_good_eval_reward': best_good_eval_reward,
'best_adversary_eval_reward': best_adversary_eval_reward,
'exp_save_dir': exp_save_dir, 'total_numsteps': total_numsteps,
'value_loss': value_loss, 'policy_loss': policy_loss,
'i_episode': i_episode, 'start_time': start_time}
copy_actor_policy(agent, eval_agent)
test_q.put([eval_agent, tr_log])
env.close()
time.sleep(5)
done_training.value = True
|
multiprocess_iterator.py
|
import logging
import os
from queue import Empty
from typing import Iterable, Iterator, List, Optional
from torch.multiprocessing import JoinableQueue, Process, Queue, get_logger
from allennlp.common.checks import ConfigurationError
from allennlp.data.dataset import Batch
from allennlp.data.dataset_readers.multiprocess_dataset_reader import QIterable
from allennlp.data.instance import Instance
from allennlp.data.iterators.data_iterator import DataIterator, TensorDict
from allennlp.data.vocabulary import Vocabulary
logger = get_logger()
logger.setLevel(logging.INFO)
def _create_tensor_dicts_from_queue(
input_queue: Queue, output_queue: Queue, iterator: DataIterator, shuffle: bool, index: int
) -> None:
"""
Pulls instances from ``input_queue``, converts them into ``TensorDict``s
using ``iterator``, and puts them on the ``output_queue``.
"""
logger.info(f"Iterator worker: {index} PID: {os.getpid()}")
def instances() -> Iterator[Instance]:
instance = input_queue.get()
while instance is not None:
yield instance
instance = input_queue.get()
for tensor_dict in iterator(instances(), num_epochs=1, shuffle=shuffle):
output_queue.put(tensor_dict)
output_queue.put(index)
# We need to ensure we've gotten all the tensors out of this queue before
# this process ends. Otherwise we'll crash. See
# https://github.com/pytorch/pytorch/issues/7181. This appears to be an
# issue specifically with tensors, perhaps due to the refcounting involved
# in managing them in shared memory. If you're working on this code, be
# aware that I've only been able to reproduce this issue on Linux. Testing
# on a Mac alone is not sufficient.
output_queue.join()
def _create_tensor_dicts_from_qiterable(
qiterable: QIterable, output_queue: Queue, iterator: DataIterator, shuffle: bool, index: int
) -> None:
"""
Pulls instances from ``qiterable.output_queue``, converts them into
``TensorDict``s using ``iterator``, and puts them on the ``output_queue``.
"""
logger.info(f"Iterator worker: {index} PID: {os.getpid()}")
def instances() -> Iterator[Instance]:
while qiterable.num_active_workers.value > 0 or qiterable.num_inflight_items.value > 0:
while True:
try:
yield qiterable.output_queue.get(block=False, timeout=1.0)
with qiterable.num_inflight_items.get_lock():
qiterable.num_inflight_items.value -= 1
except Empty:
break
for tensor_dict in iterator(instances(), num_epochs=1, shuffle=shuffle):
output_queue.put(tensor_dict)
output_queue.put(index)
# See the note above in _create_tensor_dicts_from_queue.
output_queue.join()
def _queuer(
instances: Iterable[Instance], input_queue: Queue, num_workers: int, num_epochs: Optional[int]
) -> None:
"""
Reads Instances from the iterable and puts them in the input_queue.
"""
logger.info(f"Iterator queuer. PID: {os.getpid()}")
epoch = 0
while num_epochs is None or epoch < num_epochs:
epoch += 1
for instance in instances:
input_queue.put(instance)
# Now put a None for each worker, since each needs to receive one
# to know that it's done.
for _ in range(num_workers):
input_queue.put(None)
@DataIterator.register("multiprocess")
class MultiprocessIterator(DataIterator):
"""
Wraps another ```DataIterator``` and uses it to generate tensor dicts
using multiple processes.
# Parameters
base_iterator : ``DataIterator``
The ``DataIterator`` for generating tensor dicts. It will be shared among
processes, so it should not be stateful in any way.
num_workers : ``int``, optional (default = 1)
The number of processes used for generating tensor dicts.
output_queue_size : ``int``, optional (default = 1000)
The size of the output queue on which tensor dicts are placed to be consumed.
You might need to increase this if you're generating tensor dicts too quickly.
"""
def __init__(
self, base_iterator: DataIterator, num_workers: int = 1, output_queue_size: int = 1000
) -> None:
super().__init__()
self.num_workers = num_workers
self.batch_size = base_iterator._batch_size
self.output_queue_size = output_queue_size
# These two options make the iterator stateful, which means it can't be shared
# across multiple processes.
if base_iterator._cache_instances:
raise ConfigurationError("cannot use Multiprocess iterator with cache_instances")
if base_iterator._instances_per_epoch:
raise ConfigurationError("cannot use instances_per_epoch with Multiprocess iterator")
self.iterator = base_iterator
self.processes: List[Process] = []
self.queuer: Optional[Process] = None
def _create_batches(self, instances: Iterable[Instance], shuffle: bool) -> Iterable[Batch]:
raise RuntimeError("MultiprocessIterator doesn't use create_batches")
def index_with(self, vocab: Vocabulary):
self.iterator.index_with(vocab)
def _call_with_instances(
self, instances: Iterable[Instance], num_epochs: int, shuffle: bool
) -> Iterator[TensorDict]:
# JoinableQueue needed here as sharing tensors across processes
# requires that the creating process not exit prematurely.
output_queue = JoinableQueue(self.output_queue_size)
input_queue = Queue(self.output_queue_size * self.batch_size)
# Start process that populates the queue.
self.queuer = Process(
target=_queuer, args=(instances, input_queue, self.num_workers, num_epochs)
)
self.queuer.start()
# Start the tensor-dict workers.
for i in range(self.num_workers):
args = (input_queue, output_queue, self.iterator, shuffle, i)
process = Process(target=_create_tensor_dicts_from_queue, args=args)
process.start()
self.processes.append(process)
num_finished = 0
while num_finished < self.num_workers:
item = output_queue.get()
output_queue.task_done()
if isinstance(item, int):
num_finished += 1
logger.info(f"worker {item} finished ({num_finished} / {self.num_workers})")
else:
yield item
for process in self.processes:
process.join()
self.processes.clear()
if self.queuer is not None:
self.queuer.join()
self.queuer = None
def _call_with_qiterable(
self, qiterable: QIterable, num_epochs: int, shuffle: bool
) -> Iterator[TensorDict]:
# JoinableQueue needed here as sharing tensors across processes
# requires that the creating tensor not exit prematurely.
output_queue = JoinableQueue(self.output_queue_size)
for _ in range(num_epochs):
qiterable.start()
# Start the tensor-dict workers.
for i in range(self.num_workers):
args = (qiterable, output_queue, self.iterator, shuffle, i)
process = Process(target=_create_tensor_dicts_from_qiterable, args=args)
process.start()
self.processes.append(process)
num_finished = 0
while num_finished < self.num_workers:
item = output_queue.get()
output_queue.task_done()
if isinstance(item, int):
num_finished += 1
logger.info(f"worker {item} finished ({num_finished} / {self.num_workers})")
else:
yield item
for process in self.processes:
process.join()
self.processes.clear()
qiterable.join()
def __call__(
self, instances: Iterable[Instance], num_epochs: int = None, shuffle: bool = True
) -> Iterator[TensorDict]:
# If you run it forever, the multiprocesses won't shut down correctly.
# TODO(joelgrus) find a solution for this
if num_epochs is None:
raise ConfigurationError(
"Multiprocess Iterator must be run for a fixed number of epochs"
)
if isinstance(instances, QIterable):
return self._call_with_qiterable(instances, num_epochs, shuffle)
else:
return self._call_with_instances(instances, num_epochs, shuffle)
def __del__(self) -> None:
"""
Terminate processes if the user hasn't joined implicitly by consuming
all the tensors. This is necessary as leaving stray processes running
can corrupt shared state. In brief, we've observed shared memory
counters being reused (when the memory was free from the perspective of
the parent process) while the stray workers still held a reference to
them.
For a discussion of using destructors in Python in this manner, see
https://eli.thegreenplace.net/2009/06/12/safely-using-destructors-in-python/.
"""
for process in self.processes:
process.terminate()
if self.queuer is not None:
self.queuer.terminate()
|
io.py
|
import pickle as pickle
import codecs
import contextlib
from contextlib import contextmanager
import gzip
import json
import os
import random
import shutil
import subprocess
import sys
import time
from queue import Queue, Empty
from abc import ABCMeta, abstractmethod
from collections import Mapping, OrderedDict
from os.path import join
from threading import Thread
import jsonpickle
import numpy as np
from fabric.api import local, settings
from fabric.context_managers import hide
from gtd.utils import truncated
class MultiStream(object):
def __init__(self, *streams):
self.streams = streams
def write(self, msg):
for s in self.streams:
s.write(msg)
s.flush()
def flush(self):
for s in self.streams:
s.flush()
class redirect_stream(object):
"""Inside this context manager, inputs to a target stream are redirected to a replacement stream instead."""
def __init__(self, replacement):
"""Redirect.
Args:
replacement: replace the target stream with this stream.
"""
self._replacement = replacement
@property
def target_stream(self):
"""Get the target stream."""
raise NotImplementedError
@target_stream.setter
def target_stream(self, s):
"""Set the target stream."""
raise NotImplementedError
def __enter__(self):
self._original = self.target_stream # save the original stream
self.target_stream = self._replacement
def __exit__(self, exc_type, exc_value, traceback):
self._replacement.flush()
self.target_stream = self._original # put the original stream back
class redirect_stdout(redirect_stream):
@property
def target_stream(self):
return sys.stdout
@target_stream.setter
def target_stream(self, s):
sys.stdout = s
class redirect_stderr(redirect_stream):
@property
def target_stream(self):
return sys.stderr
@target_stream.setter
def target_stream(self, s):
sys.stderr = s
class save_stdout(object):
def __init__(self, save_dir):
makedirs(save_dir)
save_file = lambda filename: open(join(save_dir, filename), 'a')
self._f_out = save_file('stdout.txt')
self._f_err = save_file('stderr.txt')
self._redirects = [redirect_stdout(MultiStream(self._f_out, sys.stdout)),
redirect_stderr(MultiStream(self._f_err, sys.stderr))]
def __enter__(self):
for r in self._redirects:
r.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
for r in self._redirects:
r.__exit__(exc_type, exc_val, exc_tb)
self._f_out.close()
self._f_err.close()
def utfopen(path, mode):
"""Open a file with UTF-8 encoding."""
return codecs.open(path, mode, encoding='utf-8')
def save(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f)
def load(path):
with open(path, 'rb') as f:
return pickle.load(f)
def work_in_sandbox(directory):
"""Create a sandbox directory, and set cwd to sandbox.
Deletes any existing sandbox directory!
Args:
directory: directory in which to put sandbox directory
"""
os.chdir(directory)
p = 'sandbox'
if os.path.exists(p): # remove if already exists
shutil.rmtree(p)
os.makedirs(p)
os.chdir(p)
print(os.getcwd())
def makedirs(directory):
"""If directory does not exist, make it.
Args:
directory (str): a path to a directory. Cannot be the empty path.
"""
if directory != '' and not os.path.exists(directory):
os.makedirs(directory)
def reset_state():
# Reset all random seeds, as well as TensorFlow default graph
random.seed(0)
np.random.seed(0)
import tensorflow as tf
from tensorflow.python.framework import ops
tf.set_random_seed(0)
ops.reset_default_graph()
class EmptyFile(object):
"""Delivers a never-ending stream of empty strings."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __iter__(self):
return self
def __next__(self):
return ''
def read_files(*file_paths):
files = []
for i, p in enumerate(file_paths):
if p:
files.append(open(p, mode="r"))
print('Opened:', p)
else:
files.append(EmptyFile())
print('WARNING: no path provided for file {} in list.'.format(i))
with contextlib.nested(*files) as entered_files:
for lines in zip(*entered_files):
yield lines
class MultiFileWriter(object):
def __init__(self, *file_paths):
self.file_paths = file_paths
def __enter__(self):
self.files = [open(fp, 'w') for fp in self.file_paths]
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for file in self.files:
file.__exit__(exc_type, exc_val, exc_tb)
def write(self, lines):
assert len(lines) == len(self.files)
for f, line in zip(self.files, lines):
f.write(line)
def open_or_create(path, *args, **kwargs):
"""Open a file or create it, if it does not exist.
Args:
path (str): path to file
gz (bool): whether to use GZIP or not. Defaults to False.
Returns:
file object
"""
gz = kwargs.pop('gz', False)
open_file = gzip.open if gz else open
if not os.path.isfile(path):
with open_file(path, 'w'):
pass # create file
return open_file(path, *args, **kwargs)
class Process(object):
def __init__(self, cmd, cwd=None):
self._proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=cwd)
def read(self, timeout=float('inf')):
def enqueue_output(out, queue):
for c in iter(lambda: out.read(1), ''):
queue.put(c)
q = Queue()
t = Thread(target=enqueue_output, args=(self._proc.stdout, q))
t.daemon = True # thread dies with the program
t.start()
last_yield_time = time.time()
while True:
try:
yield q.get(timeout=0.001)
last_yield_time = time.time()
except Empty:
# if 1 millisecond passes without new item on queue...
if not self.alive:
# break if process has died
break
if time.time() - last_yield_time > timeout:
# break if time is up
break
def read_lines(self, timeout=float('inf')):
chars = []
for c in self.read(timeout):
chars.append(c)
if c == '\n':
yield ''.join(chars[:-1])
chars = []
@property
def pid(self):
return self._proc.pid
@property
def alive(self):
code = self._proc.poll()
return code is None
def terminate(self):
return self._proc.terminate()
def wait(self):
return self._proc.wait()
def shell(cmd, cwd=None, verbose=False, debug=False):
"""Execute a command just like you would at the command line.
Attempts to print output from the command with as little buffering as possible.
http://stackoverflow.com/questions/18421757/live-output-from-subprocess-command
Args:
cmd (str): command to execute, just as you would enter at the command line
cwd (str): current working directory to execute the command
verbose (bool): whether to print out the results of the command
debug (bool): if True, command is not actually executed. Typically used with verbose=True.
Returns:
all output from the command
"""
if verbose:
print(cmd)
if debug:
return
output = []
process = Process(cmd, cwd)
for c in process.read():
output.append(c)
if verbose:
sys.stdout.write(c)
sys.stdout.flush()
status = process.wait()
if status != 0:
raise RuntimeError('Error, exit code: {}'.format(status))
# TODO: make sure we get all output
return ''.join(output)
def local_bash(command, capture=False):
"""Just like fabric.api.local, but with shell='/bin/bash'."""
return local(command, capture, shell='/bin/bash')
class JSONPicklable(object, metaclass=ABCMeta):
"""Uses jsonpickle to convert any picklable object to and from JSON."""
@abstractmethod
def __getstate__(self):
"""Return a variable with enough information to reconstruct the object."""
pass
@abstractmethod
def __setstate__(self, state):
"""Use the variable from __getstate__ to restore the object.
Note that pickle created this object without calling __init__.
So, a common strategy is to manually call self.__init__(...) inside this function, using the information
provided by `state`.
"""
pass
def to_json_str(self):
return jsonpickle.encode(self)
@classmethod
def from_json_str(self, s):
return jsonpickle.decode(s)
def to_json(self):
"""Use jsonpickle to convert this object to JSON."""
s = self.to_json_str()
d = json.loads(s) # convert str to dict
return d
@classmethod
def from_json(cls, d):
"""Use jsonpickle to convert JSON into an object."""
s = json.dumps(d)
obj = cls.from_json_str(s)
return obj
def to_file(self, path):
with open(path, 'w') as f:
json.dump(self.to_json(), f)
@classmethod
def from_file(self, path):
with open(path, 'r') as f:
d = json.load(f)
return JSONPicklable.from_json(d)
class InitPicklable(object):
def __new__(cls, *args, **kwargs):
obj = super(InitPicklable, cls).__new__(cls)
obj.__initargs = args, kwargs
return obj
def __getstate__(self):
return self.__initargs
def __setstate__(self, state):
args, kwargs = state
self.__init__(*args, **kwargs)
def sub_dirs(root_dir):
"""Return a list of all sub-directory paths.
Example:
>> root_dir = '/Users/Kelvin/data'
>> sub_dirs(root_dir)
['/Users/Kelvin/data/a', '/Users/Kelvin/data/b']
"""
dir_paths = []
for path in os.listdir(root_dir):
full_path = join(root_dir, path)
if os.path.isdir(full_path):
dir_paths.append(full_path)
return dir_paths
class IntegerDirectories(Mapping):
"""Keep track of directories with names of the form "{integer}_{something}" or just "{integer}"."""
def __init__(self, root_dir):
self.root_dir = root_dir
makedirs(root_dir)
@property
def _ints_to_paths(self):
ints_to_paths = {}
for p in sub_dirs(self.root_dir):
name = os.path.basename(p)
try:
i = int(name.split('_')[0])
if i in ints_to_paths:
raise IOError("Multiple directories with the same integer prefix: {} and {}".format(
ints_to_paths[i], p))
ints_to_paths[i] = p
except ValueError:
# the first element was not an integer
pass
# put into an ordered dict
ordered = OrderedDict()
for i in sorted(ints_to_paths):
ordered[i] = ints_to_paths[i]
return ordered
def __len__(self):
return len(self._ints_to_paths)
@property
def largest_int(self):
"""Largest int among the integer directories."""
if len(self._ints_to_paths) == 0:
return None
return max(self._ints_to_paths)
def new_dir(self, name=None):
"""Create a new directory and return its path."""
if self.largest_int is None:
idx = 0
else:
idx = self.largest_int + 1
path = join(self.root_dir, str(idx))
if name:
path = '{}_{}'.format(path, name) # add name as suffix
makedirs(path)
return path
def __getitem__(self, i):
"""Get the path to directory i.
Raises:
KeyError, if directory does not exist.
"""
if i not in self._ints_to_paths:
raise KeyError("Directory #{} not found".format(i))
return self._ints_to_paths[i]
def __iter__(self):
return iter(self._ints_to_paths)
def rsync(src_path, dest_path, src_host=None, dest_host=None, delete=False):
"""Sync a file/directory from one machine to another machine.
Args:
src_path (str): a file or directory on the source machine.
dest_path (str): the corresponding file or directory on the target machine.
src_host (str): the address of the source machine. Default is local machine.
dest_host (str): the address of the target machine. Default is local machine.
delete (bool): default is False. If True, deletes any extraneous files at the destination not
present at the source!
Options used:
-r: recurse into directories
-l: copy symlinks as symlinks
-v: verbose
-z: compress files during transfer
-t: preserve times (needed for rsync to recognize that files haven't changed since last update!)
--delete: delete any extraneous files at the destination
--progress: show progress
"""
if os.path.isdir(src_path):
if src_path[:-1] != '/':
src_path += '/' # add missing trailing slash
def format_address(host, path):
if host is None:
return path
else:
return '{}:{}'.format(host, path)
cmds = ["rsync", "-rlvzt", "--progress"]
if delete:
cmds.append('--delete')
cmds.append(format_address(src_host, src_path))
cmds.append(format_address(dest_host, dest_path))
cmd = ' '.join(cmds)
local(cmd)
def num_lines(file_path):
"""Count the number of lines in a file.
Uses the `wc` command line tool.
Args:
file_path (str)
Returns:
int
"""
return int(local('wc -l {}'.format(file_path), capture=True).split()[0])
class Tmux(object):
def __init__(self, name, cwd=None):
"""Create a tmux session.
Args:
name (str): name of the new session
cwd (str): initial directory of the session
Options used:
-d: do not attach to the new session
-s: specify a name for the session
"""
self.name = name
with settings(hide('warnings'), warn_only=True):
result = local("tmux new -d -s {}".format(name)) # start tmux session
if result.failed:
raise TmuxSessionExists()
if cwd is None:
cwd = os.getcwd()
# move to current directory
self.run("cd {}".format(cwd))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def run(self, command):
"""Run command in tmux session.
Assume that the session has only one window.
Args:
command (str)
"""
local('tmux send -t {} "{}" Enter'.format(self.name, command))
def close(self):
local("tmux kill-session -t {}".format(self.name))
class TmuxSessionExists(Exception):
pass
def tunnel(local_port, host, target, target_port, tmux_name, autossh_port=20000):
"""Make a port on a target machine appear as if it is a port on our local machine.
Uses autossh to keep the tunnel open even with interruptions.
Runs autossh in a new tmux session, so that it can be monitored.
Args:
local_port (int): a port on this machine, e.g. 18888
host (str): the machine that will be used to create the SSH tunnel, e.g. `kgu@jamie.stanford.edu` or just `jamie`
if we have that alias configured in ~/.ssh/config.
target (str): the address of the target machine, e.g. `kgu@john11.stanford.edu` or just `john11`. The address
should be RELATIVE to the host machine.
target_port (int): port on the target machine, e.g. 8888
tmux_name (str): name of the tmux session that will be running the autossh command.
autossh_port (int): local port used by autossh to monitor the connection. Cannot be used by more than one
autossh process at a time!
"""
command = "autossh -M {} -N -n -T -L {}:{}:{} {}".format(autossh_port, local_port, target, target_port, host)
tmux = Tmux(tmux_name)
tmux.run(command)
class Workspace(object):
"""Manage paths underneath a top-level root directory.
Paths are registered with this Workspace. An IOError is thrown if the path has already been registered before.
"""
def __init__(self, root):
"""Create a Workspace.
Args:
root (str): absolute path of the top-level directory.
"""
self._root = root
makedirs(root)
self._paths = set()
@property
def root(self):
return self._root
def _add(self, name, relative_path):
"""Register a path.
Args:
name (str): short name to reference the path
relative_path (str): a relative path, relative to the workspace root.
Returns:
self
"""
full_path = join(self._root, relative_path)
if hasattr(self, name):
raise IOError('Name already registered: {}'.format(name))
if full_path in self._paths:
raise IOError('Path already registered: {}'.format(relative_path))
setattr(self, name, full_path)
def add_dir(self, name, relative_path):
self._add(name, relative_path)
makedirs(getattr(self, name))
def add_file(self, name, relative_path):
self._add(name, relative_path)
def split_path(path):
"""Break a file path into its components.
Args:
path (str): e.g. '/Users/Joe/Documents/file.txt'
Returns:
elements (list[str]): e.g. ['Users', 'Joe', 'Documents', 'file.txt']
"""
elements = []
dir_name = path
while True:
dir_name, leaf = os.path.split(dir_name)
if leaf:
elements.append(leaf)
else:
break
return list(reversed(elements))
@contextmanager
def lines_in_file(path, limit=float('inf'), desc=None, compute_total=True):
from gtd.chrono import verboserate
if compute_total:
total = min(num_lines(path), limit) # compute total lines in file
else:
total = None
with codecs.open(path, 'r', encoding='utf-8') as lines:
if desc:
lines = verboserate(lines, desc=desc, total=total)
if limit:
lines = truncated(lines, limit)
yield lines
|
master.py
|
import socket
import multiprocessing
import errno
import sys
import logging
from queue import Empty
from time import sleep
from datetime import datetime
from hurricane.utils import *
from hurricane.messages import MessageTypes
from hurricane.messages import TaskManagementMessage
from hurricane.messages import HeartbeatMessage
from hurricane.messages import NewNodeMessage
from hurricane.messages import NodeInitializeMessage
class MasterNode:
def __init__(self, **kwargs):
self.initialize_port = kwargs.get('initialize_port', 12222)
self.debug = kwargs.get('debug', False)
self.max_disconnect_errors = kwargs.get('max_disconnect_errors', 3)
self.max_connections = 20
self.connect_timeout = 10
self.task_port = self.initialize_port + 1
self.task_completion_port = self.initialize_port + 2
self.scanner_input, self.scanner_output = multiprocessing.Pipe()
self.has_connection_input, self.has_connection_output = multiprocessing.Pipe()
self.completed_tasks_input, self.completed_tasks_output = multiprocessing.Pipe()
self.has_connection_tf = False
self.completed_tasks_queue = multiprocessing.Queue()
self.completed_tasks = []
self.current_tasks_queue = multiprocessing.Queue()
self.current_tasks = []
self.send_tasks_queue = multiprocessing.Queue()
self.exit_signal = multiprocessing.Event()
logging.basicConfig(format="%(asctime)s %(name)s [%(levelname)s] %(message)s", level=kwargs.get("level", logging.INFO))
def initialize(self):
"""
This method runs in the background and attempts to identify slaves to use.
"""
logging.info("Initializing the master node")
logging.info("Starting scanning process")
self.scanning_process = multiprocessing.Process(target=self.identify_slaves)
self.scanning_process.start()
logging.info("Starting task distribution process")
self.node_management_process = multiprocessing.Process(target=self.node_manager)
self.node_management_process.daemon = True
self.node_management_process.start()
def stop(self):
"""
Stop the server and kill all child processes
"""
self.exit_signal.set()
def node_manager(self):
"""
This process manages task distribution within the node network.
"""
nodes = {}
while not self.exit_signal.is_set():
nodes = self.manage_node_status(nodes)
while self.completed_tasks_input.poll():
completed_task = self.completed_tasks_input.recv()
for node in nodes:
if nodes[node]["task"]:
if nodes[node]["task"].get_task_id() == completed_task.get_task_id():
nodes[node]["task"] = None
current_tasks = []
for node in nodes:
if nodes[node]["task"]:
current_tasks.append(nodes[node]["task"].get_task_id())
self.current_tasks_queue.put(current_tasks)
for node in nodes:
if not nodes[node]["task"]:
task = None
try:
task = self.send_tasks_queue.get(block=False)
except:
pass
if task:
did_error_occur = False
try:
task_socket = create_active_socket(self.get_host(node), int(self.get_port(node)))
logging.info("Sending task " + str(task.get_task_id()) + " to " + node)
task_socket.send(encode_data(task))
task_socket.close()
nodes[node]["num_disconnects"] = 0
except socket.error as err:
did_error_occur = True
if err.errno == errno.ECONNREFUSED or err.args[0] == "timed out":
logging.error("Connection refused when attempting to send a task to " + node + ", try number " + str(nodes[node]["num_disconnects"] + 1))
nodes[node]["num_disconnects"] += 1
elif err.errno == errno.EPIPE:
logging.error("Client connection from " + node + " disconnected early")
else:
logging.error("Unknown error \"" + err.args[0] + "\" thrown when attempting to send a task to " + node)
if not did_error_occur:
nodes[node]["task"] = task
try:
if "connect_time" in nodes[node]:
if (datetime.now() - nodes[node]["connect_time"]).total_seconds() > self.connect_timeout:
task_socket = create_active_socket(self.get_host(node), int(self.get_port(node)))
task_socket.send(encode_data(HeartbeatMessage()))
task_socket.close()
nodes[node]["num_disconnects"] = 0
nodes[node]["connect_time"] = datetime.now()
else:
task_socket = create_active_socket(self.get_host(node), int(self.get_port(node)))
task_socket.send(encode_data(HeartbeatMessage()))
task_socket.close()
nodes[node]["num_disconnects"] = 0
nodes[node]["connect_time"] = datetime.now()
except socket.error as err:
nodes[node]["connect_time"] = datetime.now()
if err.errno == errno.ECONNREFUSED:
logging.error("Connection refused when attempting to send a task to " + node + ", try number " + str(nodes[node]["num_disconnects"] + 1))
nodes[node]["num_disconnects"] += 1
sleep(0.5)
sleep(0.1)
def identify_slaves(self):
"""
Identify slave nodes.
"""
initialize_socket = create_listen_socket_timer(self.initialize_port, self.max_connections)
while not self.exit_signal.is_set():
try:
connection, addr = initialize_socket.accept()
self.update_available_ports()
self.scanner_output.send(NewNodeMessage(addr, self.task_port, self.task_completion_port))
task_completion_monitoring_process = multiprocessing.Process(target=self.node_communication_receiver, args=(self.task_completion_port,))
task_completion_monitoring_process.daemon = True
task_completion_monitoring_process.start()
sleep(0.01)
connection.send(encode_data(TaskManagementMessage(task_port=self.task_port, task_completion_port=self.task_completion_port)))
connection.close()
except socket.error as err:
if err.args[0] == "timed out":
pass
def node_communication_receiver(self, port):
"""
Capture the data received whenever a node sends data to the master node.
"""
data_socket = create_listen_socket(port, self.max_connections)
while not self.exit_signal.is_set():
connection, addr = data_socket.accept()
data = read_data(connection)
connection.close()
if data.get_message() == MessageTypes.TASK:
completed_task = data.get_task()
logging.info("Received task completion for task " + str(completed_task.get_task_id()))
self.completed_tasks_output.send(completed_task)
self.completed_tasks_queue.put(completed_task)
elif data.get_message() == MessageTypes.INITIALIZE_NODE:
self.scanner_output.send(NodeInitializeMessage(addr, data.get_cpu_count()))
def is_task_completed(self, task_id):
"""
Returns "True, generated_data" if the task has been completed,
"False, None" if it has not.
"""
self.update_completed_tasks()
for task_idx in range(len(self.completed_tasks)):
task = self.completed_tasks[task_idx]
if task_id == task.get_task_id():
updated_completed_tasks = self.completed_tasks[:task_idx]
updated_completed_tasks.extend(self.completed_tasks[task_idx + 1:])
self.completed_tasks = updated_completed_tasks
return True, task
return False, None
def update_completed_tasks(self):
"""
Update the completed tasks list from the completed tasks queue
"""
while True:
try:
self.completed_tasks.append(self.completed_tasks_queue.get(block=False))
except Empty:
break
def update_current_tasks(self):
"""
Update the current tasks lists from the current tasks queue
"""
while True:
try:
self.current_tasks = self.current_tasks_queue.get(block=False)
except Empty:
break
def wait_for_any_task_completion(self, timeout=-1):
"""
Wait for any task to be completed
"""
if timeout > 0:
time = 0
while time < timeout and not self.exit_signal.is_set():
if self.completed_tasks == []:
self.update_completed_tasks()
sleep(0.1)
time += 0.1
else:
return self.completed_tasks.pop(0)
return None
while not self.exit_signal.is_set():
if self.completed_tasks == []:
self.update_completed_tasks()
sleep(0.1)
else:
return self.completed_tasks.pop(0)
def wait_for_task_completion(self, task_id, timeout=-1):
"""
Wait for the task with task_id to be completed.
"""
if self.has_connection() == False:
logging.error("No nodes are connected...please connect a node then send it a task")
return None
logging.info("Waiting for task " + str(task_id) + " to be completed")
if timeout > 0:
time = 0
while time < timeout and not self.exit_signal.is_set():
completed, data = self.is_task_completed(task_id)
if completed:
return data
else:
sleep(0.1)
time += 0.1
else:
while not self.exit_signal.is_set():
self.update_current_tasks()
if task_id in self.current_tasks:
completed, data = self.is_task_completed(task_id)
if completed:
return data
else:
sleep(0.1)
else:
return None
return None
def update_available_ports(self):
"""
Update to get next available port to communicate on.
"""
self.task_port += 2
self.task_completion_port += 2
def has_connection(self):
"""
Returns whether this MasterNode has any slave nodes connected
"""
while self.has_connection_input.poll():
self.has_connection_tf = self.has_connection_input.recv()
return self.has_connection_tf
def get_host(self, id):
"""
Read the host from the id.
"""
return id.split(":")[0]
def get_port(self, id):
"""
Read the port from the id
"""
return id.split(":")[1]
def manage_node_status(self, nodes):
"""
If a host has disconnected, remove them from the known hosts list
"""
while self.scanner_input.poll():
data = self.scanner_input.recv()
if isinstance(data, NewNodeMessage):
new_node_compiled = str(data.get_addr()[0]) + ":" + str(data.get_task_port())
if new_node_compiled not in nodes:
nodes[new_node_compiled] = {"num_disconnects" : 0, "task" : None}
self.has_connection_output.send(True)
logging.info("Identified new node at " + new_node_compiled)
elif isinstance(data, NodeInitializeMessage):
address = str(data.get_addr()[0])
for node in nodes:
if address in node:
nodes[node]["cpu_count"] = data.get_cpu_count()
logging.info("Got CPU count from node with address of " + address + ": " + str(nodes[node]["cpu_count"]))
break
should_update = False
for node, node_info in nodes.items():
if node_info["num_disconnects"] >= self.max_disconnect_errors:
logging.info("Connection with " + node + " has timed out...disconnecting from slave node")
new_nodes = {}
for inner_node, inner_node_info in nodes.items():
if inner_node != node:
new_nodes[inner_node] = inner_node_info
nodes = new_nodes
if nodes == {}:
self.has_connection_output.send(False)
return nodes
def wait_for_connection(self, timeout=-1):
"""
Block the current thread until there is a slave node to send tasks to
"""
logging.info("Waiting for a connection")
if timeout > 0:
time = 0
while self.has_connection() == False and time < timeout:
sleep(0.1)
time += 0.1
else:
while self.has_connection() == False and not self.exit_signal.is_set():
sleep(0.1)
def send_task(self, data):
"""
Distribute a task to a slave node.
"""
if self.has_connection == False:
logging.warning("No nodes are connected/available to send a task to...task will be queued until a node is available/connected")
new_task = Task(task_id=generate_task_id(), return_port=self.task_completion_port, data=data)
self.send_tasks_queue.put(new_task)
return new_task.get_task_id()
|
MainFacadeService.py
|
# -*- coding: utf-8 -*-
import socket
import sys
import time
from threading import Thread
from bottle import response, request
from pip_services3_commons.config import ConfigParams
from pip_services3_commons.errors import ConfigException
from pip_services3_commons.refer import IReferences
from pip_services3_commons.run import IOpenable
from pip_services3_components.auth import CredentialParams, CredentialResolver
from pip_services3_components.connect import ConnectionParams, ConnectionResolver
from pip_services3_rpc.services import SSLCherryPyServer
from .FacadeService import FacadeService
class MainFacadeService(FacadeService, IOpenable):
_default_config = ConfigParams.from_tuples(
'root_path', '',
'connection.protocol', 'http',
'connection.hostname', '0.0.0.0',
'connection.port', 8080,
'credential.ssl_key_file', None,
'credential.ssl_crt_file', None,
'credential.ssl_ca_file', None,
'options.debug', True,
'options.maintenance_enabled', False,
'options.max_sockets', 50,
'options.max_req_size', '1mb'
)
__server = None
__service = None
__http = None
__connection_resolver = ConnectionResolver()
__credential_resolver = CredentialResolver()
__debug = True
__maintance_enabled = False
__max_sockets = 50
__max_req_size = '1mb'
def __init__(self):
super(MainFacadeService, self).__init__()
self._root_path = ''
# bottle app
self.__service = super()._partition
def is_maintance_enabled(self) -> bool:
return self.__maintance_enabled
def set_maintance_enabled(self, value: bool):
self.__maintance_enabled = True
def configure(self, config):
config = config.set_defaults(MainFacadeService._default_config)
self.__connection_resolver.configure(config)
self.__credential_resolver.configure(config)
self._root_path = config.get_as_string_with_default('root_path', self._root_path)
if len(self._root_path) > 0 and not (self._root_path.startswith('/')):
self._root_path = '/' + self._root_path
self.__debug = config.get_as_boolean_with_default('options.debug', self.__debug)
self.__maintance_enabled = config.get_as_boolean_with_default('options.maintenance_enabled',
self.__maintance_enabled)
self.__max_sockets = config.get_as_integer_with_default('options.max_sockets', self.__max_sockets)
self.__max_req_size = config.get_as_string_with_default('options.max_req_size', self.__max_req_size)
def set_references(self, references):
super().set_references(references)
self.__connection_resolver.set_references(references)
self.__credential_resolver.set_references(references)
def is_open(self):
return self.__http is not None
def open(self, correlation_id):
if self.__http is not None:
return
connection = self._get_connetcion(correlation_id)
credential = self._get_credential(correlation_id, connection)
self.__server = self.__create_server(connection, credential)
self.__configure_service()
host = connection.get_host()
host_name = socket.gethostname()
port = connection.get_port()
self.__server.host = host
self.__server.port = port
def start_server():
try:
self.__service.run(server=self.__server, debug=self.__debug)
except Exception as ex:
self._logger.error(correlation_id, ex, 'Failed to start HTTP server at {}:{}', host_name, port)
# Start server in thread
Thread(target=start_server, daemon=True).start()
# Time for start server
time.sleep(0.01)
self._logger.info(correlation_id, 'Started HTTP server {}:{}', host_name, port)
def close(self, correlation_id):
try:
if self.__server is not None:
self.__server.shutdown()
self.__service.close()
self._logger.debug(correlation_id, "Closed HTTP server")
self.__server = None
self.__service = None
except Exception as ex:
self._logger.warn(correlation_id, "Failed while closing HTTP server: " + str(ex))
def _get_connetcion(self, correlation_id):
connection = self.__connection_resolver.resolve(correlation_id)
# Check for connection
if connection is None:
raise ConfigException(correlation_id, "NO_CONNECTION", "Connection for REST client is not defined")
else:
# Check for type
protocol = connection.get_protocol('http')
if 'http' != protocol and 'https' != protocol:
raise ConfigException(
correlation_id, "WRONG_PROTOCOL", "Protocol is not supported by REST connection").with_details(
"protocol", protocol)
# Check for host
elif connection.get_host() is None:
raise ConfigException(correlation_id, "NO_HOST", "No host is configured in REST connection")
# Check for port
elif connection.get_port() == 0:
raise ConfigException(correlation_id, "NO_PORT", "No port is configured in REST connection")
return connection
def _get_credential(self, correlation_id, connection):
# Credentials are not required unless HTTPS is used
if connection.get_protocol('http') != 'https':
return
credential = self.__credential_resolver.lookup(correlation_id)
# Check for connection
if credential is None:
raise ConfigException(correlation_id, "NO_CREDENTIAL",
"SSL certificates are not configured for HTTPS protocol")
else:
if credential.get_as_nullable_string('ssl_key_file') is None:
raise ConfigException(correlation_id, "NO_SSL_KEY_FILE",
"SSL key file is not configured in credentials")
elif credential.get_as_nullable_string('ssl_crt_file') is None:
raise ConfigException(correlation_id, "NO_SSL_CRT_FILE",
"SSL crt file is not configured in credentials")
return credential
def __create_server(self, connection, credential):
if connection.get_protocol('http') == 'https':
if connection.get_protocol('http') == 'https':
ssl_key_file = credential.get_as_nullable_string('ssl_key_file')
with open(ssl_key_file, 'rb') as file:
private_key = file.read()
ssl_crt_file = credential.get_as_nullable_string('ssl_crt_file')
with open(ssl_crt_file, 'rb') as file:
certfile = file.read()
# ca = []
#
# ssl_ca_file = credential.get_as_nullable_string('ssl_ca_file')
# if ssl_ca_file is not None:
# with open(ssl_ca_file, 'rb') as file:
# ca_text = file.read()
# while ca_text is not None and len(ca_text.strip()) > 0:
# crt_index = ca_text.rindex(b'-----BEGIN CERTIFICATE-----')
# if crt_index > -1:
# ca.append(ca_text[crt_index:])
# ca_text = ca_text[0:crt_index]
return SSLCherryPyServer(certfile=certfile,
keyfile=private_key,
request_queue_size=self.__max_sockets,
max_request_body_size=self.__max_req_size)
return SSLCherryPyServer(request_queue_size=self.__max_sockets,
max_request_body_size=self.__max_req_size)
def __configure_service(self):
self.__service.config['catchall'] = True
self.__service.config['autojson'] = True
# Enable CORS requests
self.__service.add_hook('after_request', self.__enable_cors)
self.__service.add_hook('after_request', self.__do_maintance)
self.__service.add_hook('after_request', self.__no_cache)
def __enable_cors(self):
response.headers['Access-Control-Max-Age'] = '5'
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'
response.headers[
'Access-Control-Allow-Headers'] = 'Authorization, Origin, Accept, Content-Type, X-Requested-With'
def __do_maintance(self):
"""
:return: maintenance error code
"""
# Make this more sophisticated
if self.__maintance_enabled:
response.headers['Retry-After'] = 3600
response.status = 503
def __no_cache(self):
"""
Prevents IE from caching REST requests
"""
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = 0
|
fn_api_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A PipelineRunner using the SDK harness.
"""
import collections
import copy
import logging
import Queue as queue
import re
import threading
import time
from concurrent import futures
import grpc
import apache_beam as beam # pylint: disable=ungrouped-imports
from apache_beam import metrics
from apache_beam.coders import WindowedValueCoder
from apache_beam.coders import registry
from apache_beam.coders.coder_impl import create_InputStream
from apache_beam.coders.coder_impl import create_OutputStream
from apache_beam.internal import pickler
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners import pipeline_context
from apache_beam.runners import runner
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import sdk_worker
from apache_beam.transforms import trigger
from apache_beam.transforms.window import GlobalWindows
from apache_beam.utils import proto_utils
# This module is experimental. No backwards-compatibility guarantees.
class BeamFnControlServicer(beam_fn_api_pb2_grpc.BeamFnControlServicer):
_DONE = object()
def __init__(self):
self._push_queue = queue.Queue()
self._futures_by_id = dict()
self._read_thread = threading.Thread(
name='beam_control_read', target=self._read)
self._started = False
self._uid_counter = 0
def Control(self, iterator, context):
self._inputs = iterator
# Note: We only support one client for now.
self._read_thread.start()
self._started = True
while True:
to_push = self._push_queue.get()
if to_push is self._DONE:
return
yield to_push
def _read(self):
for data in self._inputs:
self._futures_by_id.pop(data.instruction_id).set(data)
def push(self, item):
if item is self._DONE:
future = None
else:
if not item.instruction_id:
self._uid_counter += 1
item.instruction_id = 'control_%s' % self._uid_counter
future = ControlFuture(item.instruction_id)
self._futures_by_id[item.instruction_id] = future
self._push_queue.put(item)
return future
def done(self):
self.push(self._DONE)
# Can't join a thread before it's started.
while not self._started:
time.sleep(.01)
self._read_thread.join()
class _GroupingBuffer(object):
"""Used to accumulate groupded (shuffled) results."""
def __init__(self, pre_grouped_coder, post_grouped_coder, windowing):
self._key_coder = pre_grouped_coder.key_coder()
self._pre_grouped_coder = pre_grouped_coder
self._post_grouped_coder = post_grouped_coder
self._table = collections.defaultdict(list)
self._windowing = windowing
def append(self, elements_data):
input_stream = create_InputStream(elements_data)
coder_impl = self._pre_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
# TODO(robertwb): We could optimize this even more by using a
# window-dropping coder for the data plane.
is_trivial_windowing = self._windowing.is_default()
while input_stream.size() > 0:
windowed_key_value = coder_impl.decode_from_stream(input_stream, True)
key, value = windowed_key_value.value
self._table[key_coder_impl.encode(key)].append(
value if is_trivial_windowing
else windowed_key_value.with_value(value))
def __iter__(self):
output_stream = create_OutputStream()
if self._windowing.is_default():
globally_window = GlobalWindows.windowed_value(None).with_value
windowed_key_values = lambda key, values: [globally_window((key, values))]
else:
trigger_driver = trigger.create_trigger_driver(self._windowing, True)
windowed_key_values = trigger_driver.process_entire_key
coder_impl = self._post_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
for encoded_key, windowed_values in self._table.items():
key = key_coder_impl.decode(encoded_key)
for wkvs in windowed_key_values(key, windowed_values):
coder_impl.encode_to_stream(wkvs, output_stream, True)
return iter([output_stream.get()])
class _WindowGroupingBuffer(object):
"""Used to partition windowed side inputs."""
def __init__(self, side_input_data):
# Here's where we would use a different type of partitioning
# (e.g. also by key) for a different access pattern.
assert side_input_data.access_pattern == common_urns.ITERABLE_SIDE_INPUT
self._windowed_value_coder = side_input_data.coder
self._window_coder = side_input_data.coder.window_coder
self._value_coder = side_input_data.coder.wrapped_value_coder
self._values_by_window = collections.defaultdict(list)
def append(self, elements_data):
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_value = self._windowed_value_coder.get_impl(
).decode_from_stream(input_stream, True)
for window in windowed_value.windows:
self._values_by_window[window].append(windowed_value.value)
def items(self):
value_coder_impl = self._value_coder.get_impl()
for window, values in self._values_by_window.items():
encoded_window = self._window_coder.encode(window)
output_stream = create_OutputStream()
for value in values:
value_coder_impl.encode_to_stream(value, output_stream, True)
yield encoded_window, output_stream.get()
class FnApiRunner(runner.PipelineRunner):
def __init__(self, use_grpc=False, sdk_harness_factory=None):
"""Creates a new Fn API Runner.
Args:
use_grpc: whether to use grpc or simply make in-process calls
defaults to False
sdk_harness_factory: callable used to instantiate customized sdk harnesses
typcially not set by users
"""
super(FnApiRunner, self).__init__()
self._last_uid = -1
self._use_grpc = use_grpc
if sdk_harness_factory and not use_grpc:
raise ValueError('GRPC must be used if a harness factory is provided.')
self._sdk_harness_factory = sdk_harness_factory
self._progress_frequency = None
def _next_uid(self):
self._last_uid += 1
return str(self._last_uid)
def run_pipeline(self, pipeline):
MetricsEnvironment.set_metrics_supported(False)
# This is sometimes needed if type checking is disabled
# to enforce that the inputs (and outputs) of GroupByKey operations
# are known to be KVs.
from apache_beam.runners.dataflow.dataflow_runner import DataflowRunner
pipeline.visit(DataflowRunner.group_by_key_input_visitor())
return self.run_via_runner_api(pipeline.to_runner_api())
def run_via_runner_api(self, pipeline_proto):
return self.run_stages(*self.create_stages(pipeline_proto))
def create_stages(self, pipeline_proto):
# First define a couple of helpers.
def union(a, b):
# Minimize the number of distinct sets.
if not a or a == b:
return b
elif not b:
return a
else:
return frozenset.union(a, b)
class Stage(object):
"""A set of Transforms that can be sent to the worker for processing."""
def __init__(self, name, transforms,
downstream_side_inputs=None, must_follow=frozenset()):
self.name = name
self.transforms = transforms
self.downstream_side_inputs = downstream_side_inputs
self.must_follow = must_follow
def __repr__(self):
must_follow = ', '.join(prev.name for prev in self.must_follow)
downstream_side_inputs = ', '.join(
str(si) for si in self.downstream_side_inputs)
return "%s\n %s\n must follow: %s\n downstream_side_inputs: %s" % (
self.name,
'\n'.join(["%s:%s" % (transform.unique_name, transform.spec.urn)
for transform in self.transforms]),
must_follow,
downstream_side_inputs)
def can_fuse(self, consumer):
def no_overlap(a, b):
return not a.intersection(b)
return (
not self in consumer.must_follow
and not self.is_flatten() and not consumer.is_flatten()
and no_overlap(self.downstream_side_inputs, consumer.side_inputs()))
def fuse(self, other):
return Stage(
"(%s)+(%s)" % (self.name, other.name),
self.transforms + other.transforms,
union(self.downstream_side_inputs, other.downstream_side_inputs),
union(self.must_follow, other.must_follow))
def is_flatten(self):
return any(transform.spec.urn == common_urns.FLATTEN_TRANSFORM
for transform in self.transforms)
def side_inputs(self):
for transform in self.transforms:
if transform.spec.urn == common_urns.PARDO_TRANSFORM:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for side_input in payload.side_inputs:
yield transform.inputs[side_input]
def has_as_main_input(self, pcoll):
for transform in self.transforms:
if transform.spec.urn == common_urns.PARDO_TRANSFORM:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
local_side_inputs = payload.side_inputs
else:
local_side_inputs = {}
for local_id, pipeline_id in transform.inputs.items():
if pcoll == pipeline_id and local_id not in local_side_inputs:
return True
def deduplicate_read(self):
seen_pcolls = set()
new_transforms = []
for transform in self.transforms:
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
pcoll = only_element(transform.outputs.items())[1]
if pcoll in seen_pcolls:
continue
seen_pcolls.add(pcoll)
new_transforms.append(transform)
self.transforms = new_transforms
# Now define the "optimization" phases.
safe_coders = {}
def lift_combiners(stages):
"""Expands CombinePerKey into pre- and post-grouping stages.
... -> CombinePerKey -> ...
becomes
... -> PreCombine -> GBK -> MergeAccumulators -> ExtractOutput -> ...
"""
def add_or_get_coder_id(coder_proto):
for coder_id, coder in pipeline_components.coders.items():
if coder == coder_proto:
return coder_id
new_coder_id = unique_name(pipeline_components.coders, 'coder')
pipeline_components.coders[new_coder_id].CopyFrom(coder_proto)
return new_coder_id
def windowed_coder_id(coder_id):
proto = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.WINDOWED_VALUE_CODER)),
component_coder_ids=[coder_id, window_coder_id])
return add_or_get_coder_id(proto)
for stage in stages:
assert len(stage.transforms) == 1
transform = stage.transforms[0]
if transform.spec.urn == common_urns.COMBINE_PER_KEY_TRANSFORM:
combine_payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.CombinePayload)
input_pcoll = pipeline_components.pcollections[only_element(
transform.inputs.values())]
output_pcoll = pipeline_components.pcollections[only_element(
transform.outputs.values())]
windowed_input_coder = pipeline_components.coders[
input_pcoll.coder_id]
element_coder_id, window_coder_id = (
windowed_input_coder.component_coder_ids)
element_coder = pipeline_components.coders[element_coder_id]
key_coder_id, _ = element_coder.component_coder_ids
accumulator_coder_id = combine_payload.accumulator_coder_id
key_accumulator_coder = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.KV_CODER)),
component_coder_ids=[key_coder_id, accumulator_coder_id])
key_accumulator_coder_id = add_or_get_coder_id(key_accumulator_coder)
accumulator_iter_coder = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.ITERABLE_CODER)),
component_coder_ids=[accumulator_coder_id])
accumulator_iter_coder_id = add_or_get_coder_id(
accumulator_iter_coder)
key_accumulator_iter_coder = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.KV_CODER)),
component_coder_ids=[key_coder_id, accumulator_iter_coder_id])
key_accumulator_iter_coder_id = add_or_get_coder_id(
key_accumulator_iter_coder)
precombined_pcoll_id = unique_name(
pipeline_components.pcollections, 'pcollection')
pipeline_components.pcollections[precombined_pcoll_id].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=transform.unique_name + '/Precombine.out',
coder_id=windowed_coder_id(key_accumulator_coder_id),
windowing_strategy_id=input_pcoll.windowing_strategy_id,
is_bounded=input_pcoll.is_bounded))
grouped_pcoll_id = unique_name(
pipeline_components.pcollections, 'pcollection')
pipeline_components.pcollections[grouped_pcoll_id].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=transform.unique_name + '/Group.out',
coder_id=windowed_coder_id(key_accumulator_iter_coder_id),
windowing_strategy_id=output_pcoll.windowing_strategy_id,
is_bounded=output_pcoll.is_bounded))
merged_pcoll_id = unique_name(
pipeline_components.pcollections, 'pcollection')
pipeline_components.pcollections[merged_pcoll_id].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=transform.unique_name + '/Merge.out',
coder_id=windowed_coder_id(key_accumulator_coder_id),
windowing_strategy_id=output_pcoll.windowing_strategy_id,
is_bounded=output_pcoll.is_bounded))
def make_stage(base_stage, transform):
return Stage(
transform.unique_name,
[transform],
downstream_side_inputs=base_stage.downstream_side_inputs,
must_follow=base_stage.must_follow)
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Precombine',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.COMBINE_PGBKCV_TRANSFORM,
payload=transform.spec.payload),
inputs=transform.inputs,
outputs={'out': precombined_pcoll_id}))
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Group',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.GROUP_BY_KEY_TRANSFORM),
inputs={'in': precombined_pcoll_id},
outputs={'out': grouped_pcoll_id}))
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Merge',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.COMBINE_MERGE_ACCUMULATORS_TRANSFORM,
payload=transform.spec.payload),
inputs={'in': grouped_pcoll_id},
outputs={'out': merged_pcoll_id}))
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/ExtractOutputs',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.COMBINE_EXTRACT_OUTPUTS_TRANSFORM,
payload=transform.spec.payload),
inputs={'in': merged_pcoll_id},
outputs=transform.outputs))
else:
yield stage
def expand_gbk(stages):
"""Transforms each GBK into a write followed by a read.
"""
good_coder_urns = set(
value for key, value in common_urns.__dict__.items()
if re.match('[A-Z][A-Z_]*$', key))
coders = pipeline_components.coders
for coder_id, coder_proto in coders.items():
if coder_proto.spec.spec.urn == common_urns.BYTES_CODER:
bytes_coder_id = coder_id
break
else:
bytes_coder_id = unique_name(coders, 'bytes_coder')
pipeline_components.coders[bytes_coder_id].CopyFrom(
beam.coders.BytesCoder().to_runner_api(None))
coder_substitutions = {}
def wrap_unknown_coders(coder_id, with_bytes):
if (coder_id, with_bytes) not in coder_substitutions:
wrapped_coder_id = None
coder_proto = coders[coder_id]
if coder_proto.spec.spec.urn == common_urns.LENGTH_PREFIX_CODER:
coder_substitutions[coder_id, with_bytes] = (
bytes_coder_id if with_bytes else coder_id)
elif coder_proto.spec.spec.urn in good_coder_urns:
wrapped_components = [wrap_unknown_coders(c, with_bytes)
for c in coder_proto.component_coder_ids]
if wrapped_components == list(coder_proto.component_coder_ids):
# Use as is.
coder_substitutions[coder_id, with_bytes] = coder_id
else:
wrapped_coder_id = unique_name(
coders,
coder_id + ("_bytes" if with_bytes else "_len_prefix"))
coders[wrapped_coder_id].CopyFrom(coder_proto)
coders[wrapped_coder_id].component_coder_ids[:] = [
wrap_unknown_coders(c, with_bytes)
for c in coder_proto.component_coder_ids]
coder_substitutions[coder_id, with_bytes] = wrapped_coder_id
else:
# Not a known coder.
if with_bytes:
coder_substitutions[coder_id, with_bytes] = bytes_coder_id
else:
wrapped_coder_id = unique_name(coders, coder_id + "_len_prefix")
len_prefix_coder_proto = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.LENGTH_PREFIX_CODER)),
component_coder_ids=[coder_id])
coders[wrapped_coder_id].CopyFrom(len_prefix_coder_proto)
coder_substitutions[coder_id, with_bytes] = wrapped_coder_id
# This operation is idempotent.
if wrapped_coder_id:
coder_substitutions[wrapped_coder_id, with_bytes] = wrapped_coder_id
return coder_substitutions[coder_id, with_bytes]
def fix_pcoll_coder(pcoll):
new_coder_id = wrap_unknown_coders(pcoll.coder_id, False)
safe_coders[new_coder_id] = wrap_unknown_coders(pcoll.coder_id, True)
pcoll.coder_id = new_coder_id
for stage in stages:
assert len(stage.transforms) == 1
transform = stage.transforms[0]
if transform.spec.urn == common_urns.GROUP_BY_KEY_TRANSFORM:
for pcoll_id in transform.inputs.values():
fix_pcoll_coder(pipeline_components.pcollections[pcoll_id])
for pcoll_id in transform.outputs.values():
fix_pcoll_coder(pipeline_components.pcollections[pcoll_id])
# This is used later to correlate the read and write.
param = str("group:%s" % stage.name)
if stage.name not in pipeline_components.transforms:
pipeline_components.transforms[stage.name].CopyFrom(transform)
gbk_write = Stage(
transform.unique_name + '/Write',
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Write',
inputs=transform.inputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=param))],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
yield gbk_write
yield Stage(
transform.unique_name + '/Read',
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Read',
outputs=transform.outputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=param))],
downstream_side_inputs=stage.downstream_side_inputs,
must_follow=union(frozenset([gbk_write]), stage.must_follow))
else:
yield stage
def sink_flattens(stages):
"""Sink flattens and remove them from the graph.
A flatten that cannot be sunk/fused away becomes multiple writes (to the
same logical sink) followed by a read.
"""
# TODO(robertwb): Actually attempt to sink rather than always materialize.
# TODO(robertwb): Possibly fuse this into one of the stages.
pcollections = pipeline_components.pcollections
for stage in stages:
assert len(stage.transforms) == 1
transform = stage.transforms[0]
if transform.spec.urn == common_urns.FLATTEN_TRANSFORM:
# This is used later to correlate the read and writes.
param = str("materialize:%s" % transform.unique_name)
output_pcoll_id, = transform.outputs.values()
output_coder_id = pcollections[output_pcoll_id].coder_id
flatten_writes = []
for local_in, pcoll_in in transform.inputs.items():
if pcollections[pcoll_in].coder_id != output_coder_id:
# Flatten inputs must all be written with the same coder as is
# used to read them.
pcollections[pcoll_in].coder_id = output_coder_id
transcoded_pcollection = (
transform.unique_name + '/Transcode/' + local_in + '/out')
yield Stage(
transform.unique_name + '/Transcode/' + local_in,
[beam_runner_api_pb2.PTransform(
unique_name=
transform.unique_name + '/Transcode/' + local_in,
inputs={local_in: pcoll_in},
outputs={'out': transcoded_pcollection},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.IDENTITY_DOFN_URN))],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
pcollections[transcoded_pcollection].CopyFrom(
pcollections[pcoll_in])
pcollections[transcoded_pcollection].coder_id = output_coder_id
else:
transcoded_pcollection = pcoll_in
flatten_write = Stage(
transform.unique_name + '/Write/' + local_in,
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Write/' + local_in,
inputs={local_in: transcoded_pcollection},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=param))],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
flatten_writes.append(flatten_write)
yield flatten_write
yield Stage(
transform.unique_name + '/Read',
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Read',
outputs=transform.outputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=param))],
downstream_side_inputs=stage.downstream_side_inputs,
must_follow=union(frozenset(flatten_writes), stage.must_follow))
else:
yield stage
def annotate_downstream_side_inputs(stages):
"""Annotate each stage with fusion-prohibiting information.
Each stage is annotated with the (transitive) set of pcollections that
depend on this stage that are also used later in the pipeline as a
side input.
While theoretically this could result in O(n^2) annotations, the size of
each set is bounded by the number of side inputs (typically much smaller
than the number of total nodes) and the number of *distinct* side-input
sets is also generally small (and shared due to the use of union
defined above).
This representation is also amenable to simple recomputation on fusion.
"""
consumers = collections.defaultdict(list)
all_side_inputs = set()
for stage in stages:
for transform in stage.transforms:
for input in transform.inputs.values():
consumers[input].append(stage)
for si in stage.side_inputs():
all_side_inputs.add(si)
all_side_inputs = frozenset(all_side_inputs)
downstream_side_inputs_by_stage = {}
def compute_downstream_side_inputs(stage):
if stage not in downstream_side_inputs_by_stage:
downstream_side_inputs = frozenset()
for transform in stage.transforms:
for output in transform.outputs.values():
if output in all_side_inputs:
downstream_side_inputs = union(
downstream_side_inputs, frozenset([output]))
for consumer in consumers[output]:
downstream_side_inputs = union(
downstream_side_inputs,
compute_downstream_side_inputs(consumer))
downstream_side_inputs_by_stage[stage] = downstream_side_inputs
return downstream_side_inputs_by_stage[stage]
for stage in stages:
stage.downstream_side_inputs = compute_downstream_side_inputs(stage)
return stages
def greedily_fuse(stages):
"""Places transforms sharing an edge in the same stage, whenever possible.
"""
producers_by_pcoll = {}
consumers_by_pcoll = collections.defaultdict(list)
# Used to always reference the correct stage as the producer and
# consumer maps are not updated when stages are fused away.
replacements = {}
def replacement(s):
old_ss = []
while s in replacements:
old_ss.append(s)
s = replacements[s]
for old_s in old_ss[:-1]:
replacements[old_s] = s
return s
def fuse(producer, consumer):
fused = producer.fuse(consumer)
replacements[producer] = fused
replacements[consumer] = fused
# First record the producers and consumers of each PCollection.
for stage in stages:
for transform in stage.transforms:
for input in transform.inputs.values():
consumers_by_pcoll[input].append(stage)
for output in transform.outputs.values():
producers_by_pcoll[output] = stage
logging.debug('consumers\n%s', consumers_by_pcoll)
logging.debug('producers\n%s', producers_by_pcoll)
# Now try to fuse away all pcollections.
for pcoll, producer in producers_by_pcoll.items():
pcoll_as_param = str("materialize:%s" % pcoll)
write_pcoll = None
for consumer in consumers_by_pcoll[pcoll]:
producer = replacement(producer)
consumer = replacement(consumer)
# Update consumer.must_follow set, as it's used in can_fuse.
consumer.must_follow = frozenset(
replacement(s) for s in consumer.must_follow)
if producer.can_fuse(consumer):
fuse(producer, consumer)
else:
# If we can't fuse, do a read + write.
if write_pcoll is None:
write_pcoll = Stage(
pcoll + '/Write',
[beam_runner_api_pb2.PTransform(
unique_name=pcoll + '/Write',
inputs={'in': pcoll},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=pcoll_as_param))])
fuse(producer, write_pcoll)
if consumer.has_as_main_input(pcoll):
read_pcoll = Stage(
pcoll + '/Read',
[beam_runner_api_pb2.PTransform(
unique_name=pcoll + '/Read',
outputs={'out': pcoll},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=pcoll_as_param))],
must_follow=frozenset([write_pcoll]))
fuse(read_pcoll, consumer)
else:
consumer.must_follow = union(
consumer.must_follow, frozenset([write_pcoll]))
# Everything that was originally a stage or a replacement, but wasn't
# replaced, should be in the final graph.
final_stages = frozenset(stages).union(replacements.values()).difference(
replacements.keys())
for stage in final_stages:
# Update all references to their final values before throwing
# the replacement data away.
stage.must_follow = frozenset(replacement(s) for s in stage.must_follow)
# Two reads of the same stage may have been fused. This is unneeded.
stage.deduplicate_read()
return final_stages
def sort_stages(stages):
"""Order stages suitable for sequential execution.
"""
seen = set()
ordered = []
def process(stage):
if stage not in seen:
seen.add(stage)
for prev in stage.must_follow:
process(prev)
ordered.append(stage)
for stage in stages:
process(stage)
return ordered
# Now actually apply the operations.
pipeline_components = copy.deepcopy(pipeline_proto.components)
# Reify coders.
# TODO(BEAM-2717): Remove once Coders are already in proto.
coders = pipeline_context.PipelineContext(pipeline_components).coders
for pcoll in pipeline_components.pcollections.values():
if pcoll.coder_id not in coders:
window_coder = coders[
pipeline_components.windowing_strategies[
pcoll.windowing_strategy_id].window_coder_id]
coder = WindowedValueCoder(
registry.get_coder(pickler.loads(pcoll.coder_id)),
window_coder=window_coder)
pcoll.coder_id = coders.get_id(coder)
coders.populate_map(pipeline_components.coders)
known_composites = set(
[common_urns.GROUP_BY_KEY_TRANSFORM,
common_urns.COMBINE_PER_KEY_TRANSFORM])
def leaf_transforms(root_ids):
for root_id in root_ids:
root = pipeline_proto.components.transforms[root_id]
if root.spec.urn in known_composites:
yield root_id
elif not root.subtransforms:
# Make sure its outputs are not a subset of its inputs.
if set(root.outputs.values()) - set(root.inputs.values()):
yield root_id
else:
for leaf in leaf_transforms(root.subtransforms):
yield leaf
# Initial set of stages are singleton leaf transforms.
stages = [
Stage(name, [pipeline_proto.components.transforms[name]])
for name in leaf_transforms(pipeline_proto.root_transform_ids)]
# Apply each phase in order.
for phase in [
annotate_downstream_side_inputs, lift_combiners, expand_gbk,
sink_flattens, greedily_fuse, sort_stages]:
logging.info('%s %s %s', '=' * 20, phase, '=' * 20)
stages = list(phase(stages))
logging.debug('Stages: %s', [str(s) for s in stages])
# Return the (possibly mutated) context and ordered set of stages.
return pipeline_components, stages, safe_coders
def run_stages(self, pipeline_components, stages, safe_coders):
if self._use_grpc:
controller = FnApiRunner.GrpcController(self._sdk_harness_factory)
else:
controller = FnApiRunner.DirectController()
metrics_by_stage = {}
try:
pcoll_buffers = collections.defaultdict(list)
for stage in stages:
metrics_by_stage[stage.name] = self.run_stage(
controller, pipeline_components, stage,
pcoll_buffers, safe_coders).process_bundle.metrics
finally:
controller.close()
return RunnerResult(runner.PipelineState.DONE, metrics_by_stage)
def run_stage(
self, controller, pipeline_components, stage, pcoll_buffers, safe_coders):
context = pipeline_context.PipelineContext(pipeline_components)
data_operation_spec = controller.data_operation_spec()
def extract_endpoints(stage):
# Returns maps of transform names to PCollection identifiers.
# Also mutates IO stages to point to the data data_operation_spec.
data_input = {}
data_side_input = {}
data_output = {}
for transform in stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
pcoll_id = transform.spec.payload
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
target = transform.unique_name, only_element(transform.outputs)
data_input[target] = pcoll_buffers[pcoll_id]
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
target = transform.unique_name, only_element(transform.inputs)
data_output[target] = pcoll_id
else:
raise NotImplementedError
if data_operation_spec:
transform.spec.payload = data_operation_spec.SerializeToString()
else:
transform.spec.payload = ""
elif transform.spec.urn == common_urns.PARDO_TRANSFORM:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for tag, si in payload.side_inputs.items():
data_side_input[transform.unique_name, tag] = (
'materialize:' + transform.inputs[tag],
beam.pvalue.SideInputData.from_runner_api(si, None))
return data_input, data_side_input, data_output
logging.info('Running %s', stage.name)
logging.debug(' %s', stage)
data_input, data_side_input, data_output = extract_endpoints(stage)
process_bundle_descriptor = beam_fn_api_pb2.ProcessBundleDescriptor(
id=self._next_uid(),
transforms={transform.unique_name: transform
for transform in stage.transforms},
pcollections=dict(pipeline_components.pcollections.items()),
coders=dict(pipeline_components.coders.items()),
windowing_strategies=dict(
pipeline_components.windowing_strategies.items()),
environments=dict(pipeline_components.environments.items()))
# Store the required side inputs into state.
for (transform_id, tag), (pcoll_id, si) in data_side_input.items():
elements_by_window = _WindowGroupingBuffer(si)
for element_data in pcoll_buffers[pcoll_id]:
elements_by_window.append(element_data)
for window, elements_data in elements_by_window.items():
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
ptransform_id=transform_id,
side_input_id=tag,
window=window))
controller.state_handler.blocking_append(state_key, elements_data, None)
def get_buffer(pcoll_id):
if pcoll_id.startswith('materialize:'):
if pcoll_id not in pcoll_buffers:
# Just store the data chunks for replay.
pcoll_buffers[pcoll_id] = list()
elif pcoll_id.startswith('group:'):
# This is a grouping write, create a grouping buffer if needed.
if pcoll_id not in pcoll_buffers:
original_gbk_transform = pcoll_id.split(':', 1)[1]
transform_proto = pipeline_components.transforms[
original_gbk_transform]
input_pcoll = only_element(transform_proto.inputs.values())
output_pcoll = only_element(transform_proto.outputs.values())
pre_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[input_pcoll].coder_id]]
post_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[output_pcoll].coder_id]]
windowing_strategy = context.windowing_strategies[
pipeline_components
.pcollections[output_pcoll].windowing_strategy_id]
pcoll_buffers[pcoll_id] = _GroupingBuffer(
pre_gbk_coder, post_gbk_coder, windowing_strategy)
else:
# These should be the only two identifiers we produce for now,
# but special side input writes may go here.
raise NotImplementedError(pcoll_id)
return pcoll_buffers[pcoll_id]
return BundleManager(
controller, get_buffer, process_bundle_descriptor,
self._progress_frequency).process_bundle(data_input, data_output)
# These classes are used to interact with the worker.
class StateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
def __init__(self):
self._lock = threading.Lock()
self._state = collections.defaultdict(list)
def blocking_get(self, state_key, instruction_reference=None):
with self._lock:
return ''.join(self._state[self._to_key(state_key)])
def blocking_append(self, state_key, data, instruction_reference=None):
with self._lock:
self._state[self._to_key(state_key)].append(data)
def blocking_clear(self, state_key, instruction_reference=None):
with self._lock:
del self._state[self._to_key(state_key)]
@staticmethod
def _to_key(state_key):
return state_key.SerializeToString()
class GrpcStateServicer(
StateServicer, beam_fn_api_pb2_grpc.BeamFnStateServicer):
def State(self, request_stream, context=None):
# Note that this eagerly mutates state, assuming any failures are fatal.
# Thus it is safe to ignore instruction_reference.
for request in request_stream:
if request.get:
yield beam_fn_api_pb2.StateResponse(
id=request.id,
get=beam_fn_api_pb2.StateGetResponse(
data=self.blocking_get(request.state_key)))
elif request.append:
self.blocking_append(request.state_key, request.append.data)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
append=beam_fn_api_pb2.AppendResponse())
elif request.clear:
self.blocking_clear(request.state_key)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
clear=beam_fn_api_pb2.ClearResponse())
class DirectController(object):
"""An in-memory controller for fn API control, state and data planes."""
def __init__(self):
self.state_handler = FnApiRunner.StateServicer()
self.control_handler = self
self.data_plane_handler = data_plane.InMemoryDataChannel()
self.worker = sdk_worker.SdkWorker(
self.state_handler, data_plane.InMemoryDataChannelFactory(
self.data_plane_handler.inverse()), {})
self._uid_counter = 0
def push(self, request):
if not request.instruction_id:
self._uid_counter += 1
request.instruction_id = 'control_%s' % self._uid_counter
logging.debug('CONTROL REQUEST %s', request)
response = self.worker.do_instruction(request)
logging.debug('CONTROL RESPONSE %s', response)
return ControlFuture(request.instruction_id, response)
def done(self):
pass
def close(self):
pass
def data_operation_spec(self):
return None
class GrpcController(object):
"""An grpc based controller for fn API control, state and data planes."""
def __init__(self, sdk_harness_factory=None):
self.sdk_harness_factory = sdk_harness_factory
self.control_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10))
self.control_port = self.control_server.add_insecure_port('[::]:0')
self.data_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
self.data_port = self.data_server.add_insecure_port('[::]:0')
self.control_handler = BeamFnControlServicer()
beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server(
self.control_handler, self.control_server)
self.data_plane_handler = data_plane.GrpcServerDataChannel()
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
self.data_plane_handler, self.data_server)
# TODO(robertwb): Is sharing the control channel fine? Alternatively,
# how should this be plumbed?
self.state_handler = FnApiRunner.GrpcStateServicer()
beam_fn_api_pb2_grpc.add_BeamFnStateServicer_to_server(
self.state_handler, self.control_server)
logging.info('starting control server on port %s', self.control_port)
logging.info('starting data server on port %s', self.data_port)
self.data_server.start()
self.control_server.start()
self.worker = self.sdk_harness_factory(
'localhost:%s' % self.control_port
) if self.sdk_harness_factory else sdk_worker.SdkHarness(
'localhost:%s' % self.control_port, worker_count=1)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
logging.info('starting worker')
self.worker_thread.start()
def data_operation_spec(self):
url = 'localhost:%s' % self.data_port
remote_grpc_port = beam_fn_api_pb2.RemoteGrpcPort()
remote_grpc_port.api_service_descriptor.url = url
return remote_grpc_port
def close(self):
self.control_handler.done()
self.worker_thread.join()
self.data_plane_handler.close()
self.control_server.stop(5).wait()
self.data_server.stop(5).wait()
class BundleManager(object):
_uid_counter = 0
def __init__(
self, controller, get_buffer, bundle_descriptor, progress_frequency=None):
self._controller = controller
self._get_buffer = get_buffer
self._bundle_descriptor = bundle_descriptor
self._registered = False
self._progress_frequency = progress_frequency
def process_bundle(self, inputs, expected_outputs):
# Unique id for the instruction processing this bundle.
BundleManager._uid_counter += 1
process_bundle_id = 'bundle_%s' % BundleManager._uid_counter
# Register the bundle descriptor, if needed.
if not self._registered:
process_bundle_registration = beam_fn_api_pb2.InstructionRequest(
register=beam_fn_api_pb2.RegisterRequest(
process_bundle_descriptor=[self._bundle_descriptor]))
self._controller.control_handler.push(process_bundle_registration)
self._registered = True
# Write all the input data to the channel.
for (transform_id, name), elements in inputs.items():
data_out = self._controller.data_plane_handler.output_stream(
process_bundle_id, beam_fn_api_pb2.Target(
primitive_transform_reference=transform_id, name=name))
for element_data in elements:
data_out.write(element_data)
data_out.close()
# Actually start the bundle.
process_bundle = beam_fn_api_pb2.InstructionRequest(
instruction_id=process_bundle_id,
process_bundle=beam_fn_api_pb2.ProcessBundleRequest(
process_bundle_descriptor_reference=self._bundle_descriptor.id))
result_future = self._controller.control_handler.push(process_bundle)
with ProgressRequester(
self._controller, process_bundle_id, self._progress_frequency):
# Gather all output data.
expected_targets = [
beam_fn_api_pb2.Target(primitive_transform_reference=transform_id,
name=output_name)
for (transform_id, output_name), _ in expected_outputs.items()]
logging.debug('Gather all output data from %s.', expected_targets)
for output in self._controller.data_plane_handler.input_elements(
process_bundle_id, expected_targets):
target_tuple = (
output.target.primitive_transform_reference, output.target.name)
if target_tuple in expected_outputs:
self._get_buffer(expected_outputs[target_tuple]).append(output.data)
logging.debug('Wait for the bundle to finish.')
result = result_future.get()
if result.error:
raise RuntimeError(result.error)
return result
class ProgressRequester(threading.Thread):
def __init__(self, controller, instruction_id, frequency, callback=None):
super(ProgressRequester, self).__init__()
self._controller = controller
self._instruction_id = instruction_id
self._frequency = frequency
self._done = False
self._latest_progress = None
self._callback = callback
self.daemon = True
def __enter__(self):
if self._frequency:
self.start()
def __exit__(self, *unused_exc_info):
if self._frequency:
self.stop()
def run(self):
while not self._done:
try:
progress_result = self._controller.control_handler.push(
beam_fn_api_pb2.InstructionRequest(
process_bundle_progress=
beam_fn_api_pb2.ProcessBundleProgressRequest(
instruction_reference=self._instruction_id))).get()
self._latest_progress = progress_result.process_bundle_progress
if self._callback:
self._callback(self._latest_progress)
except Exception, exn:
logging.error("Bad progress: %s", exn)
time.sleep(self._frequency)
def stop(self):
self._done = True
class ControlFuture(object):
def __init__(self, instruction_id, response=None):
self.instruction_id = instruction_id
if response:
self._response = response
else:
self._response = None
self._condition = threading.Condition()
def set(self, response):
with self._condition:
self._response = response
self._condition.notify_all()
def get(self, timeout=None):
if not self._response:
with self._condition:
if not self._response:
self._condition.wait(timeout)
return self._response
class FnApiMetrics(metrics.metric.MetricResults):
def __init__(self, step_metrics):
self._counters = {}
self._distributions = {}
self._gauges = {}
for step_metric in step_metrics.values():
for ptransform_id, ptransform in step_metric.ptransforms.items():
for proto in ptransform.user:
key = metrics.execution.MetricKey(
ptransform_id,
metrics.metricbase.MetricName.from_runner_api(proto.metric_name))
if proto.HasField('counter_data'):
self._counters[key] = proto.counter_data.value
elif proto.HasField('distribution_data'):
self._distributions[
key] = metrics.cells.DistributionResult(
metrics.cells.DistributionData.from_runner_api(
proto.distribution_data))
elif proto.HasField('gauge_data'):
self._gauges[
key] = metrics.cells.GaugeResult(
metrics.cells.GaugeData.from_runner_api(
proto.gauge_data))
def query(self, filter=None):
counters = [metrics.execution.MetricResult(k, v, v)
for k, v in self._counters.items()
if self.matches(filter, k)]
distributions = [metrics.execution.MetricResult(k, v, v)
for k, v in self._distributions.items()
if self.matches(filter, k)]
gauges = [metrics.execution.MetricResult(k, v, v)
for k, v in self._gauges.items()
if self.matches(filter, k)]
return {'counters': counters,
'distributions': distributions,
'gauges': gauges}
class RunnerResult(runner.PipelineResult):
def __init__(self, state, metrics_by_stage):
super(RunnerResult, self).__init__(state)
self._metrics_by_stage = metrics_by_stage
self._user_metrics = None
def wait_until_finish(self, duration=None):
return self._state
def metrics(self):
if self._user_metrics is None:
self._user_metrics = FnApiMetrics(self._metrics_by_stage)
return self._user_metrics
def only_element(iterable):
element, = iterable
return element
def unique_name(existing, prefix):
if prefix in existing:
counter = 0
while True:
counter += 1
prefix_counter = prefix + "_%s" % counter
if prefix_counter not in existing:
return prefix_counter
else:
return prefix
|
feature_extract_dcl.py
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import tensorflow as tf
import cv2
import numpy as np
import math
from tqdm import tqdm
import argparse
from multiprocessing import Queue, Process
sys.path.append("../")
from libs.networks import build_whole_network_dcl_tsne
from help_utils import tools
from libs.label_name_dict.label_dict import *
from libs.box_utils import draw_box_in_img
from libs.box_utils.coordinate_convert import forward_convert, backward_convert, coordinate_present_convert
from libs.box_utils import nms_rotate
from libs.box_utils.rotate_polygon_nms import rotate_gpu_nms
def worker(gpu_id, images, det_net, args, result_queue):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
# 1. preprocess img
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3]) # is RGB. not BGR
img_batch = tf.cast(img_plac, tf.float32)
if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
img_batch = (img_batch / 255 - tf.constant(cfgs.PIXEL_MEAN_)) / tf.constant(cfgs.PIXEL_STD)
else:
img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0)
detection_scores, detection_category, detection_boxes_angle, detection_boxes_angle_logits = det_net.build_whole_detection_network(
input_img_batch=img_batch,
gtboxes_batch_h=None,
gtboxes_batch_r=None,
gt_encode_label=None)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
restorer, restore_ckpt = det_net.get_restorer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model %d ...' % gpu_id)
for img_path in images:
# if 'P0968' not in img_path:
# continue
img = cv2.imread(img_path)
box_res_rotate = []
label_res_rotate = []
score_res_rotate = []
logits_res_rotate = []
imgH = img.shape[0]
imgW = img.shape[1]
img_short_side_len_list = cfgs.IMG_SHORT_SIDE_LEN if args.multi_scale else [cfgs.IMG_SHORT_SIDE_LEN]
if imgH < args.h_len:
temp = np.zeros([args.h_len, imgW, 3], np.float32)
temp[0:imgH, :, :] = img
img = temp
imgH = args.h_len
if imgW < args.w_len:
temp = np.zeros([imgH, args.w_len, 3], np.float32)
temp[:, 0:imgW, :] = img
img = temp
imgW = args.w_len
for hh in range(0, imgH, args.h_len - args.h_overlap):
if imgH - hh - 1 < args.h_len:
hh_ = imgH - args.h_len
else:
hh_ = hh
for ww in range(0, imgW, args.w_len - args.w_overlap):
if imgW - ww - 1 < args.w_len:
ww_ = imgW - args.w_len
else:
ww_ = ww
src_img = img[hh_:(hh_ + args.h_len), ww_:(ww_ + args.w_len), :]
for short_size in img_short_side_len_list:
max_len = cfgs.IMG_MAX_LENGTH
if args.h_len < args.w_len:
new_h, new_w = short_size, min(int(short_size * float(args.w_len) / args.h_len), max_len)
else:
new_h, new_w = min(int(short_size * float(args.h_len) / args.w_len), max_len), short_size
img_resize = cv2.resize(src_img, (new_w, new_h))
resized_img, det_boxes_r_, det_scores_r_, det_category_r_, det_angle_logits_ = \
sess.run(
[img_batch, detection_boxes_angle, detection_scores, detection_category, detection_boxes_angle_logits],
feed_dict={img_plac: img_resize[:, :, ::-1]}
)
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
src_h, src_w = src_img.shape[0], src_img.shape[1]
if len(det_boxes_r_) > 0:
det_boxes_r_ = forward_convert(det_boxes_r_, False)
det_boxes_r_[:, 0::2] *= (src_w / resized_w)
det_boxes_r_[:, 1::2] *= (src_h / resized_h)
# det_boxes_r_ = backward_convert(det_boxes_r_, False)
for ii in range(len(det_boxes_r_)):
box_rotate = det_boxes_r_[ii]
box_rotate[0::2] = box_rotate[0::2] + ww_
box_rotate[1::2] = box_rotate[1::2] + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_[ii])
score_res_rotate.append(det_scores_r_[ii])
logits_res_rotate.append(det_angle_logits_[ii])
box_res_rotate = np.array(box_res_rotate)
label_res_rotate = np.array(label_res_rotate)
score_res_rotate = np.array(score_res_rotate)
logits_res_rotate = np.array(logits_res_rotate)
box_res_rotate_ = []
label_res_rotate_ = []
score_res_rotate_ = []
logits_res_rotate_ = []
threshold = {'roundabout': 0.1, 'tennis-court': 0.3, 'swimming-pool': 0.1, 'storage-tank': 0.2,
'soccer-ball-field': 0.3, 'small-vehicle': 0.2, 'ship': 0.2, 'plane': 0.3,
'large-vehicle': 0.1, 'helicopter': 0.2, 'harbor': 0.0001, 'ground-track-field': 0.3,
'bridge': 0.0001, 'basketball-court': 0.3, 'baseball-diamond': 0.3}
for sub_class in range(1, cfgs.CLASS_NUM + 1):
index = np.where(label_res_rotate == sub_class)[0]
if len(index) == 0:
continue
tmp_boxes_r = box_res_rotate[index]
tmp_label_r = label_res_rotate[index]
tmp_score_r = score_res_rotate[index]
tmp_logits_r = logits_res_rotate[index]
tmp_boxes_r_ = backward_convert(tmp_boxes_r, False)
try:
inx = nms_rotate.nms_rotate_cpu(boxes=np.array(tmp_boxes_r_),
scores=np.array(tmp_score_r),
iou_threshold=threshold[LABEL_NAME_MAP[sub_class]],
max_output_size=5000)
except:
tmp_boxes_r_ = np.array(tmp_boxes_r_)
tmp = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
tmp[:, 0:-1] = tmp_boxes_r_
tmp[:, -1] = np.array(tmp_score_r)
# Note: the IoU of two same rectangles is 0, which is calculated by rotate_gpu_nms
jitter = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
jitter[:, 0] += np.random.rand(tmp_boxes_r_.shape[0], ) / 1000
inx = rotate_gpu_nms(np.array(tmp, np.float32) + np.array(jitter, np.float32),
float(threshold[LABEL_NAME_MAP[sub_class]]), 0)
box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])
score_res_rotate_.extend(np.array(tmp_score_r)[inx])
label_res_rotate_.extend(np.array(tmp_label_r)[inx])
logits_res_rotate_.extend(np.array(tmp_logits_r)[inx])
result_dict = {'boxes': np.array(box_res_rotate_), 'scores': np.array(score_res_rotate_),
'labels': np.array(label_res_rotate_), 'logits': np.array(logits_res_rotate_),
'image_id': img_path}
result_queue.put_nowait(result_dict)
def test_dota(det_net, real_test_img_list, args, txt_name):
save_path = os.path.join('./test_dota', cfgs.VERSION)
nr_records = len(real_test_img_list)
pbar = tqdm(total=nr_records)
gpu_num = len(args.gpus.strip().split(','))
nr_image = math.ceil(nr_records / gpu_num)
result_queue = Queue(500)
procs = []
for i, gpu_id in enumerate(args.gpus.strip().split(',')):
start = i * nr_image
end = min(start + nr_image, nr_records)
split_records = real_test_img_list[start:end]
proc = Process(target=worker, args=(int(gpu_id), split_records, det_net, args, result_queue))
print('process:%d, start:%d, end:%d' % (i, start, end))
proc.start()
procs.append(proc)
log_dir = './dcl_log/{}'.format(cfgs.VERSION)
tools.mkdir(log_dir)
fw_tsv = open(os.path.join(log_dir, 'dcl_meta.tsv'), 'w')
# fw_tsv.write("Label\n")
final_logits = []
for i in range(nr_records):
res = result_queue.get()
if args.show_box:
nake_name = res['image_id'].split('/')[-1]
tools.mkdir(os.path.join(save_path, 'dota_img_vis'))
draw_path = os.path.join(save_path, 'dota_img_vis', nake_name)
draw_img = np.array(cv2.imread(res['image_id']), np.float32)
detected_boxes = backward_convert(res['boxes'], with_label=False)
detected_indices = res['scores'] >= cfgs.VIS_SCORE
detected_scores = res['scores'][detected_indices]
detected_boxes = detected_boxes[detected_indices]
detected_categories = res['labels'][detected_indices]
final_detections = draw_box_in_img.draw_boxes_with_label_and_scores(draw_img,
boxes=detected_boxes,
labels=detected_categories,
scores=detected_scores,
method=1,
head=np.ones_like(detected_scores) * -1,
is_csl=True,
in_graph=False)
cv2.imwrite(draw_path, final_detections)
else:
detected_indices = res['scores'] >= cfgs.VIS_SCORE
res['scores'] = res['scores'][detected_indices]
res['boxes'] = res['boxes'][detected_indices]
res['labels'] = res['labels'][detected_indices]
rboxes = backward_convert(res['boxes'], with_label=False)
rboxes = coordinate_present_convert(rboxes, -1, False)
rlogits = res['logits'][detected_indices]
for ii, rb in enumerate(rboxes):
fw_tsv.write("%d\n" % (int(rb[-1])))
final_logits.append(rlogits[ii])
fw = open(txt_name, 'a+')
fw.write('{}\n'.format(res['image_id'].split('/')[-1]))
fw.close()
pbar.set_description("Test image %s" % res['image_id'].split('/')[-1])
pbar.update(1)
for p in procs:
p.join()
fw_tsv.close()
final_logits = np.array(final_logits)
np.save(os.path.join(log_dir, "final_logits.npy"), final_logits)
def eval(num_imgs, args):
txt_name = '{}.txt'.format(cfgs.VERSION)
if not args.show_box:
if not os.path.exists(txt_name):
fw = open(txt_name, 'w')
fw.close()
fr = open(txt_name, 'r')
img_filter = fr.readlines()
print('****************************'*3)
print('Already tested imgs:', img_filter)
print('****************************'*3)
fr.close()
test_imgname_list = [os.path.join(args.test_dir, img_name) for img_name in os.listdir(args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff')) and
(img_name + '\n' not in img_filter)]
else:
test_imgname_list = [os.path.join(args.test_dir, img_name) for img_name in os.listdir(args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff'))]
assert len(test_imgname_list) != 0, 'test_dir has no imgs there.' \
' Note that, we only support img format of (.jpg, .png, and .tiff) '
if num_imgs == np.inf:
real_test_img_list = test_imgname_list
else:
real_test_img_list = test_imgname_list[: num_imgs]
dcl = build_whole_network_dcl_tsne.DetectionNetwork(base_network_name=cfgs.NET_NAME,
is_training=False)
test_dota(det_net=dcl, real_test_img_list=real_test_img_list, args=args, txt_name=txt_name)
if not args.show_box:
os.remove(txt_name)
def parse_args():
parser = argparse.ArgumentParser('evaluate the result with Pascal2007 strand')
parser.add_argument('--test_dir', dest='test_dir',
help='evaluate imgs dir ',
default='/data/DOTA/test/images/', type=str)
parser.add_argument('--gpus', dest='gpus',
help='gpu id',
default='0,1,2,3,4,5,6,7', type=str)
parser.add_argument('--eval_num', dest='eval_num',
help='the num of eval imgs',
default=np.inf, type=int)
parser.add_argument('--show_box', '-s', default=False,
action='store_true')
parser.add_argument('--multi_scale', '-ms', default=False,
action='store_true')
parser.add_argument('--h_len', dest='h_len',
help='image height',
default=600, type=int)
parser.add_argument('--w_len', dest='w_len',
help='image width',
default=600, type=int)
parser.add_argument('--h_overlap', dest='h_overlap',
help='height overlap',
default=150, type=int)
parser.add_argument('--w_overlap', dest='w_overlap',
help='width overlap',
default=150, type=int)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print(20*"--")
print(args)
print(20*"--")
eval(args.eval_num,
args=args)
|
url_loader_pipes.py
|
#!/usr/env/bin python
from multiprocessing import Process, Pipe
import urllib.request
def load_url(url, pipe):
url_handle = urllib.request.urlopen(url)
url_data = url_handle.read()
# The data returned by read() call is in the bytearray format. We need to
# decode the data before we can print it.
html_data = url_data.decode('utf-8')
url_handle.close()
pipe.send(html_data)
if __name__ == '__main__':
url = 'http://www.w3c.org'
parent_pipe, child_pipe = Pipe()
loader_process = Process(target=load_url, args=(url, child_pipe))
print("Spawning a new process to load the url")
loader_process.start()
print("Waiting for the spawned process to exit")
html_data = parent_pipe.recv()
print(html_data)
loader_process.join()
print("Exiting...")
|
backend.py
|
# -*- coding: utf-8 -*-
import ast
import builtins
import copy
import functools
import importlib
import inspect
import io
import logging
import os.path
import pkgutil
import pydoc
import re
import signal
import site
import subprocess
import sys
import tokenize
import traceback
import types
import warnings
from collections import namedtuple
from importlib.machinery import PathFinder, SourceFileLoader
from threading import Thread
import __main__ # @UnresolvedImport
import _ast
import thonny
from thonny.common import (
BackendEvent,
DebuggerCommand,
DebuggerResponse,
FrameInfo,
InlineCommand,
InlineResponse,
InputSubmission,
TextRange,
ToplevelCommand,
ToplevelResponse,
UserError,
ValueInfo,
parse_message,
path_startswith,
range_contains_smaller,
range_contains_smaller_or_equal,
serialize_message,
get_exe_dirs,
get_augmented_system_path,
update_system_path,
)
import queue
BEFORE_STATEMENT_MARKER = "_thonny_hidden_before_stmt"
BEFORE_EXPRESSION_MARKER = "_thonny_hidden_before_expr"
AFTER_STATEMENT_MARKER = "_thonny_hidden_after_stmt"
AFTER_EXPRESSION_MARKER = "_thonny_hidden_after_expr"
logger = logging.getLogger("thonny.backend")
_CONFIG_FILENAME = os.path.join(thonny.THONNY_USER_DIR, "backend_configuration.ini")
TempFrameInfo = namedtuple(
"TempFrameInfo",
[
"system_frame",
"locals",
"globals",
"event",
"focus",
"node_tags",
"current_statement",
"current_root_expression",
"current_evaluations",
],
)
_vm = None
class VM:
def __init__(self):
global _vm
_vm = self
self._ini = None
self._command_handlers = {}
self._object_info_tweakers = []
self._import_handlers = {}
self._input_queue = queue.Queue()
self._source_preprocessors = []
self._ast_postprocessors = []
self._main_dir = os.path.dirname(sys.modules["thonny"].__file__)
self._heap = (
{}
) # WeakValueDictionary would be better, but can't store reference to None
self._source_info_by_frame = {}
site.sethelper() # otherwise help function is not available
pydoc.pager = pydoc.plainpager # otherwise help command plays tricks
self._install_fake_streams()
self._current_executor = None
self._io_level = 0
init_msg = self._fetch_command()
original_argv = sys.argv.copy()
original_path = sys.path.copy()
# clean up path
sys.path = [d for d in sys.path if d != ""]
# start in shell mode
sys.argv[:] = [""] # empty "script name"
sys.path.insert(0, "") # current dir
# clean __main__ global scope
for key in list(__main__.__dict__.keys()):
if not key.startswith("__") or key in {"__file__", "__cached__"}:
del __main__.__dict__[key]
# unset __doc__, then exec dares to write doc of the script there
__main__.__doc__ = None
self._frontend_sys_path = init_msg["frontend_sys_path"]
self._load_shared_modules()
self._load_plugins()
self.send_message(
ToplevelResponse(
main_dir=self._main_dir,
original_argv=original_argv,
original_path=original_path,
argv=sys.argv,
path=sys.path,
usersitepackages=site.getusersitepackages()
if site.ENABLE_USER_SITE
else None,
prefix=sys.prefix,
welcome_text="Python " + _get_python_version_string(),
executable=sys.executable,
exe_dirs=get_exe_dirs(),
in_venv=(
hasattr(sys, "base_prefix")
and sys.base_prefix != sys.prefix
or hasattr(sys, "real_prefix")
and getattr(sys, "real_prefix") != sys.prefix
),
python_version=_get_python_version_string(),
cwd=os.getcwd(),
)
)
self._install_signal_handler()
def mainloop(self):
try:
while True:
try:
cmd = self._fetch_command()
if isinstance(cmd, InputSubmission):
self._input_queue.put(cmd)
elif isinstance(cmd, ToplevelCommand):
self._source_info_by_frame = {}
self._input_queue = queue.Queue()
self.handle_command(cmd)
else:
self.handle_command(cmd)
except KeyboardInterrupt:
logger.exception("Interrupt in mainloop")
# Interrupt must always result in waiting_toplevel_command state
# Don't show error messages, as the interrupted command may have been InlineCommand
# (handlers of ToplevelCommands in normal cases catch the interrupt and provide
# relevant message)
self.send_message(ToplevelResponse())
except Exception:
logger.exception("Crash in mainloop")
traceback.print_exc()
def add_command(self, command_name, handler):
"""Handler should be 1-argument function taking command object.
Handler may return None (in this case no response is sent to frontend)
or a BackendResponse
"""
self._command_handlers[command_name] = handler
def add_object_info_tweaker(self, tweaker):
"""Tweaker should be 2-argument function taking value and export record"""
self._object_info_tweakers.append(tweaker)
def add_import_handler(self, module_name, handler):
if module_name not in self._import_handlers:
self._import_handlers[module_name] = []
self._import_handlers[module_name].append(handler)
def add_source_preprocessor(self, func):
self._source_preprocessors.append(func)
def add_ast_postprocessor(self, func):
self._ast_postprocessors.append(func)
def get_main_module(self):
return __main__
def handle_command(self, cmd):
assert isinstance(cmd, (ToplevelCommand, InlineCommand))
def create_error_response(**kw):
if isinstance(cmd, ToplevelCommand):
return ToplevelResponse(command_name=cmd.name, **kw)
else:
return InlineResponse(command_name=cmd.name, **kw)
if cmd.name in self._command_handlers:
handler = self._command_handlers[cmd.name]
else:
handler = getattr(self, "_cmd_" + cmd.name, None)
if handler is None:
response = create_error_response(error="Unknown command: " + cmd.name)
else:
try:
response = handler(cmd)
except SystemExit:
# Must be caused by Thonny or plugins code
if isinstance(cmd, ToplevelCommand):
traceback.print_exc()
response = create_error_response(SystemExit=True)
except UserError as e:
sys.stderr.write(str(e) + "\n")
response = create_error_response()
except KeyboardInterrupt:
response = create_error_response(
user_exception=self._prepare_user_exception()
)
except Exception:
_report_internal_error()
response = create_error_response(
context_info="other unhandled exception"
)
if response is False:
# Command doesn't want to send any response
return
if response is None and isinstance(cmd, ToplevelCommand):
# create simple default response
response = ToplevelResponse(command_name=cmd.name)
# TODO: add these in response creation time in a helper function
if isinstance(response, ToplevelResponse):
response["gui_is_active"] = (
self._get_tkinter_default_root() is not None
or self._get_qt_app() is not None
)
self.send_message(response)
def get_option(self, name, default=None):
section, subname = self._parse_option_name(name)
val = self._get_ini().get(section, subname, fallback=default)
try:
return ast.literal_eval(val)
except Exception:
return val
def set_option(self, name, value):
ini = self._get_ini()
section, subname = self._parse_option_name(name)
if not ini.has_section(section):
ini.add_section(section)
if not isinstance(value, str):
value = repr(value)
ini.set(section, subname, value)
self.save_settings()
def switch_env_to_script_mode(self, cmd):
if "" in sys.path:
sys.path.remove("") # current directory
filename = cmd.args[0]
if os.path.isfile(filename):
sys.path.insert(0, os.path.abspath(os.path.dirname(filename)))
__main__.__dict__["__file__"] = filename
def _parse_option_name(self, name):
if "." in name:
return name.split(".", 1)
else:
return "general", name
def _get_ini(self):
if self._ini is None:
import configparser
self._ini = configparser.ConfigParser(interpolation=None)
self._ini.read(_CONFIG_FILENAME)
return self._ini
def save_settings(self):
if self._ini is None:
return
with open(_CONFIG_FILENAME, "w") as fp:
self._ini.write(fp)
def _custom_import(self, *args, **kw):
module = self._original_import(*args, **kw)
if not hasattr(module, "__name__"):
return module
# module specific handlers
for handler in self._import_handlers.get(module.__name__, []):
try:
handler(module)
except Exception:
_report_internal_error()
# general handlers
for handler in self._import_handlers.get("*", []):
try:
handler(module)
except Exception:
_report_internal_error()
return module
def _load_shared_modules(self):
self.load_modules_with_frontend_path(
["parso", "jedi", "thonnycontrib", "six", "asttokens"]
)
def load_modules_with_frontend_path(self, names):
from importlib import import_module
original_sys_path = sys.path
try:
sys.path = sys.path + self._frontend_sys_path
for name in names:
try:
import_module(name)
except ImportError:
pass
finally:
sys.path = original_sys_path
def _load_plugins(self):
# built-in plugins
import thonny.plugins.backend # pylint: disable=redefined-outer-name
self._load_plugins_from_path(
thonny.plugins.backend.__path__, "thonny.plugins.backend."
)
# 3rd party plugins from namespace package
try:
import thonnycontrib.backend # @UnresolvedImport
except ImportError:
# No 3rd party plugins installed
pass
else:
self._load_plugins_from_path(
thonnycontrib.backend.__path__, "thonnycontrib.backend."
)
def _load_plugins_from_path(self, path, prefix):
load_function_name = "load_plugin"
for _, module_name, _ in sorted(
pkgutil.iter_modules(path, prefix), key=lambda x: x[1]
):
try:
m = importlib.import_module(module_name)
if hasattr(m, load_function_name):
f = getattr(m, load_function_name)
sig = inspect.signature(f)
if len(sig.parameters) == 0:
f()
else:
f(self)
except Exception:
logger.exception("Failed loading plugin '" + module_name + "'")
def _install_signal_handler(self):
def signal_handler(signal_, frame):
raise KeyboardInterrupt("Execution interrupted")
if os.name == "nt":
signal.signal(signal.SIGBREAK, signal_handler)
else:
signal.signal(signal.SIGINT, signal_handler)
def _cmd_cd(self, cmd):
if len(cmd.args) == 1:
path = cmd.args[0]
try:
os.chdir(path)
return ToplevelResponse()
except FileNotFoundError:
raise UserError("No such folder: " + path)
else:
raise UserError("cd takes one parameter")
def _cmd_Run(self, cmd):
self.switch_env_to_script_mode(cmd)
return self._execute_file(cmd, SimpleRunner)
def _cmd_run(self, cmd):
return self._execute_file(cmd, SimpleRunner)
def _cmd_FastDebug(self, cmd):
self.switch_env_to_script_mode(cmd)
return self._execute_file(cmd, FastTracer)
def _cmd_Debug(self, cmd):
self.switch_env_to_script_mode(cmd)
return self._execute_file(cmd, NiceTracer)
def _cmd_debug(self, cmd):
return self._execute_file(cmd, NiceTracer)
def _cmd_execute_source(self, cmd):
"""Executes Python source entered into shell"""
filename = "<pyshell>"
ws_stripped_source = cmd.source.strip()
source = ws_stripped_source.strip("?")
num_stripped_question_marks = len(ws_stripped_source) - len(source)
# let's see if it's single expression or something more complex
try:
root = ast.parse(source, filename=filename, mode="exec")
except SyntaxError as e:
error = "".join(traceback.format_exception_only(type(e), e))
sys.stderr.write(error)
return ToplevelResponse()
assert isinstance(root, ast.Module)
if len(root.body) == 1 and isinstance(root.body[0], ast.Expr):
mode = "eval"
elif len(root.body) > 1 and isinstance(root.body[-1], ast.Expr):
mode = "exec+eval"
else:
mode = "exec"
result_attributes = self._execute_source(
source,
filename,
mode,
NiceTracer if getattr(cmd, "debug_mode", False) else SimpleRunner,
cmd,
)
result_attributes["num_stripped_question_marks"] = num_stripped_question_marks
return ToplevelResponse(command_name="execute_source", **result_attributes)
def _cmd_execute_system_command(self, cmd):
env = dict(os.environ).copy()
encoding = "utf-8"
env["PYTHONIOENCODING"] = encoding
# Make sure this python interpreter and its scripts are available
# in PATH
update_system_path(env, get_augmented_system_path(get_exe_dirs()))
popen_kw = dict(
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
env=env,
universal_newlines=True,
)
if sys.version_info >= (3, 6):
popen_kw["errors"] = "replace"
popen_kw["encoding"] = encoding
assert cmd.cmd_line.startswith("!")
cmd_line = cmd.cmd_line[1:]
proc = subprocess.Popen(cmd_line, **popen_kw)
def copy_stream(source, target):
while True:
c = source.readline()
if c == "":
break
else:
target.write(c)
copy_out = Thread(
target=lambda: copy_stream(proc.stdout, sys.stdout), daemon=True
)
copy_err = Thread(
target=lambda: copy_stream(proc.stderr, sys.stderr), daemon=True
)
copy_out.start()
copy_err.start()
try:
proc.wait()
except KeyboardInterrupt as e:
print(str(e), file=sys.stderr)
copy_out.join()
copy_err.join()
def _cmd_process_gui_events(self, cmd):
# advance the event loop
try:
# First try Tkinter:
root = self._get_tkinter_default_root()
if root is not None:
import tkinter
# http://bugs.python.org/issue989712
# http://bugs.python.org/file6090/run.py.diff
while root.dooneevent(tkinter._tkinter.DONT_WAIT):
pass
else:
# Try Qt only when Tkinter is not used
app = self._get_qt_app()
if app is not None:
app.processEvents()
except Exception:
pass
return False
def _cmd_get_globals(self, cmd):
warnings.warn("_cmd_get_globals is deprecated for CPython")
try:
return InlineResponse(
"get_globals",
module_name=cmd.module_name,
globals=self.export_globals(cmd.module_name),
)
except Exception as e:
return InlineResponse(
"get_globals", module_name=cmd.module_name, error=str(e)
)
def _cmd_get_frame_info(self, cmd):
atts = {}
try:
# TODO: make it work also in past states
frame, location = self._lookup_frame_by_id(cmd["frame_id"])
if frame is None:
atts["error"] = "Frame not found"
else:
atts["code_name"] = frame.f_code.co_name
atts["module_name"] = frame.f_globals["__name__"]
atts["locals"] = (
None
if frame.f_locals is frame.f_globals
else self.export_variables(frame.f_locals)
)
atts["globals"] = self.export_variables(frame.f_globals)
atts["freevars"] = frame.f_code.co_freevars
atts["location"] = location
except Exception as e:
atts["error"] = str(e)
return InlineResponse("get_frame_info", frame_id=cmd.frame_id, **atts)
def _cmd_get_active_distributions(self, cmd):
try:
# if it is called after first installation to user site packages
# this dir is not yet in sys.path
if (
site.ENABLE_USER_SITE
and site.getusersitepackages()
and os.path.exists(site.getusersitepackages())
and site.getusersitepackages() not in sys.path
):
# insert before first site packages item
for i, item in enumerate(sys.path):
if "site-packages" in item or "dist-packages" in item:
sys.path.insert(i, site.getusersitepackages())
break
else:
sys.path.append(site.getusersitepackages())
import pkg_resources
pkg_resources._initialize_master_working_set()
dists = {
dist.key: {
"project_name": dist.project_name,
"key": dist.key,
"location": dist.location,
"version": dist.version,
}
for dist in pkg_resources.working_set
} # pylint: disable=not-an-iterable
return InlineResponse(
"get_active_distributions",
distributions=dists,
usersitepackages=site.getusersitepackages()
if site.ENABLE_USER_SITE
else None,
)
except Exception:
return InlineResponse(
"get_active_distributions", error=traceback.format_exc()
)
def _cmd_get_locals(self, cmd):
for frame in inspect.stack():
if id(frame) == cmd.frame_id:
return InlineResponse(
"get_locals", locals=self.export_variables(frame.f_locals)
)
raise RuntimeError("Frame '{0}' not found".format(cmd.frame_id))
def _cmd_get_heap(self, cmd):
result = {}
for key in self._heap:
result[key] = self.export_value(self._heap[key])
return InlineResponse("get_heap", heap=result)
def _cmd_shell_autocomplete(self, cmd):
error = None
try:
import jedi
except ImportError:
completions = []
error = "Could not import jedi"
else:
try:
# with warnings.catch_warnings():
interpreter = jedi.Interpreter(cmd.source, [__main__.__dict__])
completions = self._export_completions(interpreter.completions())
except Exception as e:
completions = []
error = "Autocomplete error: " + str(e)
return InlineResponse(
"shell_autocomplete",
source=cmd.source,
completions=completions,
error=error,
)
def _cmd_editor_autocomplete(self, cmd):
error = None
try:
import jedi
self._debug(jedi.__file__, sys.path)
with warnings.catch_warnings():
script = jedi.Script(cmd.source, cmd.row, cmd.column, cmd.filename)
completions = self._export_completions(script.completions())
except ImportError:
completions = []
error = "Could not import jedi"
except Exception as e:
completions = []
error = "Autocomplete error: " + str(e)
return InlineResponse(
"editor_autocomplete",
source=cmd.source,
row=cmd.row,
column=cmd.column,
filename=cmd.filename,
completions=completions,
error=error,
)
def _cmd_Reset(self, cmd):
if len(cmd.args) == 0:
# nothing to do, because Reset always happens in fresh process
return ToplevelResponse(
command_name="Reset",
welcome_text="Python " + _get_python_version_string(),
executable=sys.executable,
)
else:
raise UserError("Command 'Reset' doesn't take arguments")
def _export_completions(self, jedi_completions):
result = []
for c in jedi_completions:
if not c.name.startswith("__"):
record = {
"name": c.name,
"complete": c.complete,
"type": c.type,
"description": c.description,
}
""" TODO:
try:
if c.type in ["class", "module", "function"]:
if c.type == "function":
record["docstring"] = c.docstring()
else:
record["docstring"] = c.description + "\n" + c.docstring()
except Exception:
pass
"""
result.append(record)
return result
def _cmd_get_object_info(self, cmd):
if (
isinstance(self._current_executor, NiceTracer)
and self._current_executor.is_in_past()
):
info = {"id": cmd.object_id, "error": "past info not available"}
elif cmd.object_id in self._heap:
value = self._heap[cmd.object_id]
attributes = {}
if cmd.include_attributes:
for name in dir(value):
if not name.startswith("__") or cmd.all_attributes:
# attributes[name] = inspect.getattr_static(value, name)
try:
attributes[name] = getattr(value, name)
except Exception:
pass
self._heap[id(type(value))] = type(value)
info = {
"id": cmd.object_id,
"repr": repr(value),
"type": str(type(value)),
"full_type_name": str(type(value))
.replace("<class '", "")
.replace("'>", "")
.strip(),
"type_id": id(type(value)),
"attributes": self.export_variables(attributes),
}
if isinstance(value, io.TextIOWrapper):
self._add_file_handler_info(value, info)
elif isinstance(
value,
(
types.BuiltinFunctionType,
types.BuiltinMethodType,
types.FunctionType,
types.LambdaType,
types.MethodType,
),
):
self._add_function_info(value, info)
elif isinstance(value, (list, tuple, set)):
self._add_elements_info(value, info)
elif isinstance(value, dict):
self._add_entries_info(value, info)
elif hasattr(value, "image_data"):
info["image_data"] = value.image_data
for tweaker in self._object_info_tweakers:
try:
tweaker(value, info, cmd)
except Exception:
logger.exception("Failed object info tweaker: " + str(tweaker))
else:
info = {"id": cmd.object_id, "error": "object info not available"}
return InlineResponse("get_object_info", id=cmd.object_id, info=info)
def _get_tkinter_default_root(self):
# tkinter._default_root is not None,
# when window has been created and mainloop isn't called or hasn't ended yet
tkinter = sys.modules.get("tkinter")
if tkinter is not None:
return getattr(tkinter, "_default_root", None)
else:
return None
def _get_qt_app(self):
mod = sys.modules.get("PyQt5.QtCore")
if mod is None:
mod = sys.modules.get("PyQt4.QtCore")
if mod is None:
mod = sys.modules.get("PySide.QtCore")
if mod is None:
return None
app_class = getattr(mod, "QCoreApplication", None)
if app_class is not None:
try:
return app_class.instance()
except Exception:
return None
else:
return None
def _add_file_handler_info(self, value, info):
try:
assert isinstance(value.name, str)
assert value.mode in ("r", "rt", "tr", "br", "rb")
assert value.errors in ("strict", None)
assert value.newlines is None or value.tell() > 0
# TODO: cache the content
# TODO: don't read too big files
with open(value.name, encoding=value.encoding) as f:
info["file_encoding"] = f.encoding
info["file_content"] = f.read()
info["file_tell"] = value.tell()
except Exception as e:
info["file_error"] = "Could not get file content, error:" + str(e)
def _add_function_info(self, value, info):
try:
info["source"] = inspect.getsource(value)
except Exception:
pass
def _add_elements_info(self, value, info):
info["elements"] = []
for element in value:
info["elements"].append(self.export_value(element))
def _add_entries_info(self, value, info):
info["entries"] = []
for key in value:
info["entries"].append(
(self.export_value(key), self.export_value(value[key]))
)
def _execute_file(self, cmd, executor_class):
# args are accepted only in Run and Debug,
# and were stored in sys.argv already in VM.__init__
# TODO: are they?
if len(cmd.args) >= 1:
sys.argv = cmd.args
filename = cmd.args[0]
if os.path.isabs(filename):
full_filename = filename
else:
full_filename = os.path.abspath(filename)
with tokenize.open(full_filename) as fp:
source = fp.read()
for preproc in self._source_preprocessors:
source = preproc(source, cmd)
result_attributes = self._execute_source(
source,
full_filename,
"exec",
executor_class,
cmd,
self._ast_postprocessors,
)
result_attributes["filename"] = full_filename
return ToplevelResponse(command_name=cmd.name, **result_attributes)
else:
raise UserError("Command '%s' takes at least one argument" % cmd.name)
def _execute_source(
self,
source,
filename,
execution_mode,
executor_class,
cmd,
ast_postprocessors=[],
):
self._current_executor = executor_class(self, cmd)
try:
return self._current_executor.execute_source(
source, filename, execution_mode, ast_postprocessors
)
finally:
self._current_executor = None
def _install_fake_streams(self):
self._original_stdin = sys.stdin
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
# yes, both out and err will be directed to out (but with different tags)
# this allows client to see the order of interleaving writes to stdout/stderr
sys.stdin = VM.FakeInputStream(self, sys.stdin)
sys.stdout = VM.FakeOutputStream(self, sys.stdout, "stdout")
sys.stderr = VM.FakeOutputStream(self, sys.stdout, "stderr")
# fake it properly: replace also "backup" streams
sys.__stdin__ = sys.stdin
sys.__stdout__ = sys.stdout
sys.__stderr__ = sys.stderr
def _install_custom_import(self):
self._original_import = builtins.__import__
builtins.__import__ = self._custom_import
def _restore_original_import(self):
builtins.__import__ = self._original_import
def _fetch_command(self):
line = self._original_stdin.readline()
if line == "":
logger.info("Read stdin EOF")
sys.exit()
cmd = parse_message(line)
return cmd
def send_message(self, msg):
if "cwd" not in msg:
msg["cwd"] = os.getcwd()
if isinstance(msg, ToplevelResponse) and "globals" not in msg:
msg["globals"] = self.export_globals()
self._original_stdout.write(serialize_message(msg) + "\n")
self._original_stdout.flush()
def export_value(self, value, max_repr_length=5000):
self._heap[id(value)] = value
try:
rep = repr(value)
except Exception:
# See https://bitbucket.org/plas/thonny/issues/584/problem-with-thonnys-back-end-obj-no
rep = "??? <repr error>"
if len(rep) > max_repr_length:
rep = rep[:max_repr_length] + "…"
return ValueInfo(id(value), rep)
def export_variables(self, variables):
result = {}
for name in variables:
if not name.startswith("__"):
result[name] = self.export_value(variables[name], 100)
return result
def export_globals(self, module_name="__main__"):
if module_name in sys.modules:
return self.export_variables(sys.modules[module_name].__dict__)
else:
raise RuntimeError("Module '{0}' is not loaded".format(module_name))
def _debug(self, *args):
logger.debug("VM: " + str(args))
def _enter_io_function(self):
self._io_level += 1
def _exit_io_function(self):
self._io_level -= 1
def is_doing_io(self):
return self._io_level > 0
def _export_stack(self, newest_frame, skip_checker=None):
result = []
system_frame = newest_frame
while system_frame is not None:
module_name = system_frame.f_globals["__name__"]
code_name = system_frame.f_code.co_name
if not skip_checker or not skip_checker(system_frame):
source, firstlineno, in_library = self._get_frame_source_info(
system_frame
)
result.insert(
0,
FrameInfo(
# TODO: can this id be reused by a later frame?
# Need to store the refernce to avoid GC?
# I guess it is not required, as id will be required
# only for stacktrace inspection, and sys.last_exception
# will have the reference anyway
# (NiceTracer has its own reference keeping)
id=id(system_frame),
filename=system_frame.f_code.co_filename,
module_name=module_name,
code_name=code_name,
locals=self.export_variables(system_frame.f_locals),
globals=self.export_variables(system_frame.f_globals),
freevars=system_frame.f_code.co_freevars,
source=source,
lineno=system_frame.f_lineno,
firstlineno=firstlineno,
in_library=in_library,
event="line",
focus=TextRange(
system_frame.f_lineno, 0, system_frame.f_lineno + 1, 0
),
node_tags=None,
current_statement=None,
current_evaluations=None,
current_root_expression=None,
),
)
if module_name == "__main__" and code_name == "<module>":
# this was last frame relevant to the user
break
system_frame = system_frame.f_back
return result
def _lookup_frame_by_id(self, frame_id):
def lookup_from_stack(frame):
if frame is None:
return None
elif id(frame) == frame_id:
return frame
else:
return lookup_from_stack(frame.f_back)
def lookup_from_tb(entry):
if entry is None:
return None
elif id(entry.tb_frame) == frame_id:
return entry.tb_frame
else:
return lookup_from_tb(entry.tb_next)
result = lookup_from_stack(inspect.currentframe())
if result is not None:
return result, "stack"
if getattr(sys, "last_traceback"):
result = lookup_from_tb(getattr(sys, "last_traceback"))
if result:
return result, "last_traceback"
_, _, tb = sys.exc_info()
return lookup_from_tb(tb), "current_exception"
def _get_frame_source_info(self, frame):
fid = id(frame)
if fid not in self._source_info_by_frame:
self._source_info_by_frame[fid] = _fetch_frame_source_info(frame)
return self._source_info_by_frame[fid]
def _prepare_user_exception(self):
e_type, e_value, e_traceback = sys.exc_info()
sys.last_type, sys.last_value, sys.last_traceback = (
e_type,
e_value,
e_traceback,
)
processed_tb = traceback.extract_tb(e_traceback)
tb = e_traceback
while tb.tb_next is not None:
tb = tb.tb_next
last_frame = tb.tb_frame
if e_type is SyntaxError:
# Don't show ast frame
while (
last_frame.f_code.co_filename
and last_frame.f_code.co_filename == ast.__file__
):
last_frame = last_frame.f_back
if e_type is SyntaxError:
msg = (
traceback.format_exception_only(e_type, e_value)[-1]
.replace(e_type.__name__ + ":", "")
.strip()
)
else:
msg = str(e_value)
return {
"type_name": e_type.__name__,
"message": msg,
"stack": self._export_stack(last_frame),
"items": format_exception_with_frame_info(e_type, e_value, e_traceback),
"filename": getattr(e_value, "filename", processed_tb[-1].filename),
"lineno": getattr(e_value, "lineno", processed_tb[-1].lineno),
"col_offset": getattr(e_value, "offset", None),
"line": getattr(e_value, "text", processed_tb[-1].line),
}
class FakeStream:
def __init__(self, vm, target_stream):
self._vm = vm
self._target_stream = target_stream
self._processed_symbol_count = 0
def isatty(self):
return True
def __getattr__(self, name):
# TODO: is it safe to perform those other functions without notifying vm
# via _enter_io_function?
return getattr(self._target_stream, name)
class FakeOutputStream(FakeStream):
def __init__(self, vm, target_stream, stream_name):
VM.FakeStream.__init__(self, vm, target_stream)
self._stream_name = stream_name
def write(self, data):
try:
self._vm._enter_io_function()
# click may send bytes instead of strings
if isinstance(data, bytes):
data = data.decode(errors="replace")
if data != "":
self._vm.send_message(
BackendEvent(
"ProgramOutput", stream_name=self._stream_name, data=data
)
)
self._processed_symbol_count += len(data)
finally:
self._vm._exit_io_function()
def writelines(self, lines):
try:
self._vm._enter_io_function()
self.write("".join(lines))
finally:
self._vm._exit_io_function()
class FakeInputStream(FakeStream):
def _generic_read(self, method, limit=-1):
# is there some queued input?
if not self._vm._input_queue.empty():
cmd = self._vm._input_queue.get()
self._processed_symbol_count += len(cmd.data)
return cmd.data
# new input needs to be requested
try:
self._vm._enter_io_function()
self._vm.send_message(
BackendEvent("InputRequest", method=method, limit=limit)
)
while True:
cmd = self._vm._fetch_command()
if isinstance(cmd, InputSubmission):
self._processed_symbol_count += len(cmd.data)
return cmd.data
elif isinstance(cmd, InlineCommand):
self._vm.handle_command(cmd)
else:
raise RuntimeError(
"Wrong type of command when waiting for input"
)
finally:
self._vm._exit_io_function()
def read(self, limit=-1):
return self._generic_read("read", limit)
def readline(self, limit=-1):
return self._generic_read("readline", limit)
def readlines(self, limit=-1):
return self._generic_read("readlines", limit)
def __next__(self):
return self.readline()
def __iter__(self):
return self
def prepare_hooks(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
try:
sys.meta_path.insert(0, self)
self._vm._install_custom_import()
return method(self, *args, **kwargs)
finally:
del sys.meta_path[0]
if hasattr(self._vm, "_original_import"):
self._vm._restore_original_import()
return wrapper
def return_execution_result(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
try:
result = method(self, *args, **kwargs)
if result is not None:
return result
return {"context_info": "after normal execution"}
except Exception:
return {"user_exception": self._vm._prepare_user_exception()}
return wrapper
class Executor:
def __init__(self, vm, original_cmd):
self._vm = vm
self._original_cmd = original_cmd
self._main_module_path = None
def execute_source(self, source, filename, mode, ast_postprocessors):
if isinstance(source, str):
# TODO: simplify this or make sure encoding is correct
source = source.encode("utf-8")
if os.path.exists(filename):
self._main_module_path = filename
global_vars = __main__.__dict__
statements = expression = None
try:
if mode == "exec+eval":
assert not ast_postprocessors
# Useful in shell to get last expression value in multi-statement block
root = self._prepare_ast(source, filename, "exec")
statements = compile(ast.Module(body=root.body[:-1]), filename, "exec")
expression = compile(
ast.Expression(root.body[-1].value), filename, "eval"
)
else:
root = self._prepare_ast(source, filename, mode)
if mode == "eval":
assert not ast_postprocessors
expression = compile(root, filename, mode)
elif mode == "exec":
for func in ast_postprocessors:
func(root)
statements = compile(root, filename, mode)
else:
raise ValueError("Unknown mode")
return self._execute_prepared_user_code(statements, expression, global_vars)
except SyntaxError:
return {"user_exception": self._vm._prepare_user_exception()}
except SystemExit:
return {"SystemExit": True}
except Exception:
_report_internal_error()
return {}
@return_execution_result
@prepare_hooks
def _execute_prepared_user_code(self, statements, expression, global_vars):
if statements:
exec(statements, global_vars)
if expression:
value = eval(expression, global_vars)
if value is not None:
builtins._ = value
return {"value_info": self._vm.export_value(value)}
def find_spec(self, fullname, path=None, target=None):
"""override in subclass for custom-loading user modules"""
return None
def _prepare_ast(self, source, filename, mode):
return ast.parse(source, filename, mode)
class SimpleRunner(Executor):
pass
class Tracer(Executor):
def __init__(self, vm, original_cmd):
super().__init__(vm, original_cmd)
self._thonny_src_dir = os.path.dirname(sys.modules["thonny"].__file__)
self._fresh_exception = None
# first (automatic) stepping command depends on whether any breakpoints were set or not
breakpoints = self._original_cmd.breakpoints
assert isinstance(breakpoints, dict)
if breakpoints:
command_name = "resume"
else:
command_name = "step_into"
self._current_command = DebuggerCommand(
command_name,
state=None,
focus=None,
frame_id=None,
exception=None,
breakpoints=breakpoints,
)
def _trace(self, frame, event, arg):
raise NotImplementedError()
def _frame_is_alive(self, frame_id):
raise NotImplementedError()
def _execute_prepared_user_code(self, statements, expression, global_vars):
try:
sys.settrace(self._trace)
if hasattr(sys, "breakpointhook"):
old_breakpointhook = sys.breakpointhook
sys.breakpointhook = self._breakpointhook
return super()._execute_prepared_user_code(
statements, expression, global_vars
)
finally:
sys.settrace(None)
if hasattr(sys, "breakpointhook"):
sys.breakpointhook = old_breakpointhook
def _should_skip_frame(self, frame):
# For some reason Pylint doesn't see inspect.CO_GENERATOR and such
# pylint: disable=no-member
code = frame.f_code
return (
code is None
or code.co_filename is None
or not self._is_interesting_module_file(code.co_filename)
or code.co_flags & inspect.CO_GENERATOR # @UndefinedVariable
or sys.version_info >= (3, 5)
and code.co_flags & inspect.CO_COROUTINE # @UndefinedVariable
or sys.version_info >= (3, 5)
and code.co_flags & inspect.CO_ITERABLE_COROUTINE # @UndefinedVariable
or sys.version_info >= (3, 6)
and code.co_flags & inspect.CO_ASYNC_GENERATOR # @UndefinedVariable
or "importlib._bootstrap" in code.co_filename
or self._vm.is_doing_io()
or path_startswith(code.co_filename, self._thonny_src_dir)
)
def _is_interesting_module_file(self, path):
# interesting files are files directly in current directory
# or under the same directory as main module
# or the ones with breakpoints
return (
path_startswith(path, os.getcwd())
or self._main_module_path is not None
and path_startswith(path, os.path.dirname(self._main_module_path))
or path in self._current_command["breakpoints"]
)
def _is_interesting_exception(self, frame):
# interested only in exceptions in command frame or its parent frames
return id(frame) == self._current_command[
"frame_id"
] or not self._frame_is_alive(self._current_command["frame_id"])
def _fetch_next_debugger_command(self):
while True:
cmd = self._vm._fetch_command()
if isinstance(cmd, InlineCommand):
self._vm.handle_command(cmd)
else:
assert isinstance(cmd, DebuggerCommand)
return cmd
def _register_affected_frame(self, exception_obj, frame):
if not hasattr(exception_obj, "_affected_frame_ids_"):
exception_obj._affected_frame_ids_ = set()
exception_obj._affected_frame_ids_.add(id(frame))
def _get_current_exception(self):
if self._fresh_exception is not None:
return self._fresh_exception
else:
return sys.exc_info()
def _export_exception_info(self):
exc = self._get_current_exception()
if exc[0] is None:
return {
"id": None,
"msg": None,
"type_name": None,
"lines_with_frame_info": None,
"affected_frame_ids": set(),
"is_fresh": False,
}
else:
return {
"id": id(exc[1]),
"msg": str(exc[1]),
"type_name": exc[0].__name__,
"lines_with_frame_info": format_exception_with_frame_info(*exc),
"affected_frame_ids": exc[1]._affected_frame_ids_,
"is_fresh": exc == self._fresh_exception,
}
def _get_breakpoints_with_cursor_position(self, cmd):
if cmd["cursor_position"] is None:
return cmd["breakpoints"]
else:
result = copy.copy(cmd["breakpoints"])
path, line = cmd["cursor_position"]
if path not in result:
result[path] = set()
result[path].add(line)
return result
def _breakpointhook(self, *args, **kw):
pass
class FastTracer(Tracer):
def __init__(self, vm, original_cmd):
super().__init__(vm, original_cmd)
self._alive_frame_ids = set()
def _breakpointhook(self, *args, **kw):
frame = inspect.currentframe()
while self._should_skip_frame(frame):
frame = frame.f_back
self._report_current_state(frame)
self._current_command = self._fetch_next_debugger_command()
def _trace(self, frame, event, arg):
# is this frame interesting at all?
if event == "call" and self._should_skip_frame(frame):
return None
if event == "call":
self._fresh_exception = None
# can we skip this frame?
if (
self._current_command.name == "step_over"
and not self._current_command.breakpoints
):
return None
else:
self._alive_frame_ids.add(id(frame))
elif event == "return":
self._fresh_exception = None
self._alive_frame_ids.remove(id(frame))
elif event == "exception":
self._fresh_exception = arg
self._register_affected_frame(arg[1], frame)
if self._is_interesting_exception(frame):
# UI doesn't know about separate exception events
self._report_current_state(frame)
self._current_command = self._fetch_next_debugger_command()
elif event == "line":
self._fresh_exception = None
handler = getattr(self, "_cmd_%s_completed" % self._current_command.name)
if handler(frame, self._current_command):
self._report_current_state(frame)
self._current_command = self._fetch_next_debugger_command()
else:
self._fresh_exception = None
return self._trace
def _report_current_state(self, frame):
msg = DebuggerResponse(
stack=self._vm._export_stack(frame, self._should_skip_frame),
in_present=True,
io_symbol_count=None,
exception_info=self._export_exception_info(),
tracer_class="FastTracer",
)
self._vm.send_message(msg)
def _cmd_step_into_completed(self, frame, cmd):
return True
def _cmd_step_over_completed(self, frame, cmd):
frame_id = id(frame)
return (
frame_id == cmd.frame_id
or cmd.frame_id not in self._alive_frame_ids
or self._at_a_breakpoint(frame, cmd)
)
def _cmd_step_out_completed(self, frame, cmd):
return cmd.frame_id not in self._alive_frame_ids or self._at_a_breakpoint(
frame, cmd
)
def _cmd_resume_completed(self, frame, cmd):
return self._at_a_breakpoint(frame, cmd)
def _cmd_run_to_cursor_completed(self, frame, cmd):
return self._at_a_breakpoint(
frame, cmd, self._get_breakpoints_with_cursor_position(cmd)
)
def _at_a_breakpoint(self, frame, cmd, breakpoints=None):
# TODO: try re-entering same line in loop
if breakpoints is None:
breakpoints = cmd.breakpoints
filename = frame.f_code.co_filename
return filename in breakpoints and frame.f_lineno in breakpoints[filename]
def _frame_is_alive(self, frame_id):
return frame_id in self._alive_frame_ids
class NiceTracer(Tracer):
def __init__(self, vm, original_cmd):
super().__init__(vm, original_cmd)
self._instrumented_files = set()
self._install_marker_functions()
self._custom_stack = []
self._saved_states = []
self._current_state_index = 0
from collections import Counter
self._fulltags = Counter()
self._nodes = {}
def _breakpointhook(self, *args, **kw):
self._report_state(len(self._saved_states) - 1)
self._current_command = self._fetch_next_debugger_command()
def _install_marker_functions(self):
# Make dummy marker functions universally available by putting them
# into builtin scope
self.marker_function_names = {
BEFORE_STATEMENT_MARKER,
AFTER_STATEMENT_MARKER,
BEFORE_EXPRESSION_MARKER,
AFTER_EXPRESSION_MARKER,
}
for name in self.marker_function_names:
if not hasattr(builtins, name):
setattr(builtins, name, getattr(self, name))
def _prepare_ast(self, source, filename, mode):
# ast_utils need to be imported after asttokens
# is (custom-)imported
from thonny import ast_utils
root = ast.parse(source, filename, mode)
ast_utils.mark_text_ranges(root, source)
self._tag_nodes(root)
self._insert_expression_markers(root)
self._insert_statement_markers(root)
self._insert_for_target_markers(root)
self._instrumented_files.add(filename)
return root
def _should_skip_frame(self, frame):
code = frame.f_code
return (
# never skip marker functions
code.co_name not in self.marker_function_names
and (
super()._should_skip_frame(frame)
or code.co_filename not in self._instrumented_files
)
)
def find_spec(self, fullname, path=None, target=None):
spec = PathFinder.find_spec(fullname, path, target)
if (
spec is not None
and isinstance(spec.loader, SourceFileLoader)
and getattr(spec, "origin", None)
and self._is_interesting_module_file(spec.origin)
):
spec.loader = FancySourceFileLoader(fullname, spec.origin, self)
return spec
else:
return super().find_spec(fullname, path, target)
def is_in_past(self):
return self._current_state_index < len(self._saved_states) - 1
def _trace(self, frame, event, arg):
try:
return self._trace_and_catch(frame, event, arg)
except BaseException:
traceback.print_exc()
sys.settrace(None)
return None
def _trace_and_catch(self, frame, event, arg):
"""
1) Detects marker calls and responds to client queries in these spots
2) Maintains a customized view of stack
"""
# frame skipping test should be done only in new frames
if event == "call" and self._should_skip_frame(frame):
return None
code_name = frame.f_code.co_name
if event == "call":
self._fresh_exception = (
None
) # some code is running, therefore exception is not fresh anymore
if code_name in self.marker_function_names:
# the main thing
if code_name == BEFORE_STATEMENT_MARKER:
event = "before_statement"
elif code_name == AFTER_STATEMENT_MARKER:
event = "after_statement"
elif code_name == BEFORE_EXPRESSION_MARKER:
event = "before_expression"
elif code_name == AFTER_EXPRESSION_MARKER:
event = "after_expression"
else:
raise AssertionError("Unknown marker function")
marker_function_args = frame.f_locals.copy()
node = self._nodes[marker_function_args["node_id"]]
del marker_function_args["self"]
if "call_function" not in node.tags:
self._handle_progress_event(
frame.f_back, event, marker_function_args, node
)
self._try_interpret_as_again_event(
frame.f_back, event, marker_function_args, node
)
else:
# Calls to proper functions.
# Client doesn't care about these events,
# it cares about "before_statement" events in the first statement of the body
self._custom_stack.append(CustomStackFrame(frame, "call"))
elif event == "exception":
self._fresh_exception = arg
self._register_affected_frame(arg[1], frame)
# use the state prepared by previous event
last_custom_frame = self._custom_stack[-1]
assert last_custom_frame.system_frame == frame
assert last_custom_frame.event.startswith("before_")
pseudo_event = last_custom_frame.event.replace("before_", "after_").replace(
"_again", ""
)
self._handle_progress_event(frame, pseudo_event, {}, last_custom_frame.node)
elif event == "return":
self._fresh_exception = None
if code_name not in self.marker_function_names:
self._custom_stack.pop()
if len(self._custom_stack) == 0:
# We popped last frame, this means our program has ended.
# There may be more events coming from upper (system) frames
# but we're not interested in those
sys.settrace(None)
else:
pass
else:
self._fresh_exception = None
return self._trace
def _handle_progress_event(self, frame, event, args, node):
self._save_current_state(frame, event, args, node)
self._respond_to_commands()
def _save_current_state(self, frame, event, args, node):
"""
Updates custom stack and stores the state
self._custom_stack always keeps last info,
which gets exported as FrameInfos to _saved_states["stack"]
"""
focus = TextRange(
node.lineno, node.col_offset, node.end_lineno, node.end_col_offset
)
custom_frame = self._custom_stack[-1]
custom_frame.event = event
custom_frame.focus = focus
custom_frame.node = node
custom_frame.node_tags = node.tags
if self._saved_states:
prev_state = self._saved_states[-1]
prev_state_frame = self._create_actual_active_frame(prev_state)
else:
prev_state = None
prev_state_frame = None
# store information about current statement / expression
if "statement" in event:
custom_frame.current_statement = focus
if event == "before_statement_again":
# keep the expression information from last event
pass
else:
custom_frame.current_root_expression = None
custom_frame.current_evaluations = []
else:
assert "expression" in event
assert prev_state_frame is not None
# see whether current_root_expression needs to be updated
prev_root_expression = prev_state_frame.current_root_expression
if event == "before_expression" and (
id(frame) != id(prev_state_frame.system_frame)
or "statement" in prev_state_frame.event
or not range_contains_smaller_or_equal(prev_root_expression, focus)
):
custom_frame.current_root_expression = focus
custom_frame.current_evaluations = []
if event == "after_expression" and "value" in args:
# value is missing in case of exception
custom_frame.current_evaluations.append(
(focus, self._vm.export_value(args["value"]))
)
# Save the snapshot.
# Check if we can share something with previous state
if (
prev_state is not None
and id(prev_state_frame.system_frame) == id(frame)
and prev_state["exception_value"] is self._get_current_exception()[1]
and prev_state["fresh_exception_id"] == id(self._fresh_exception)
and ("before" in event or "skipexport" in node.tags)
):
exception_info = prev_state["exception_info"]
# share the stack ...
stack = prev_state["stack"]
# ... but override certain things
active_frame_overrides = {
"event": custom_frame.event,
"focus": custom_frame.focus,
"node_tags": custom_frame.node_tags,
"current_root_expression": custom_frame.current_root_expression,
"current_evaluations": custom_frame.current_evaluations.copy(),
"current_statement": custom_frame.current_statement,
}
else:
# make full export
stack = self._export_stack()
exception_info = self._export_exception_info()
active_frame_overrides = {}
msg = {
"stack": stack,
"active_frame_overrides": active_frame_overrides,
"in_client_log": False,
"io_symbol_count": (
sys.stdin._processed_symbol_count
+ sys.stdout._processed_symbol_count
+ sys.stderr._processed_symbol_count
),
"exception_value": self._get_current_exception()[1],
"fresh_exception_id": id(self._fresh_exception),
"exception_info": exception_info,
}
self._saved_states.append(msg)
def _respond_to_commands(self):
"""Tries to respond to client commands with states collected so far.
Returns if these states don't suffice anymore and Python needs
to advance the program"""
# while the state for current index is already saved:
while self._current_state_index < len(self._saved_states):
state = self._saved_states[self._current_state_index]
# Get current state's most recent frame (together with overrides
frame = self._create_actual_active_frame(state)
# Is this state meant to be seen?
if "skip_" + frame.event not in frame.node_tags:
# if True:
# Has the command completed?
tester = getattr(
self, "_cmd_" + self._current_command.name + "_completed"
)
cmd_complete = tester(frame, self._current_command)
if cmd_complete:
state["in_client_log"] = True
self._report_state(self._current_state_index)
self._current_command = self._fetch_next_debugger_command()
if self._current_command.name == "step_back":
if self._current_state_index == 0:
# Already in first state. Remain in this loop
pass
else:
assert self._current_state_index > 0
# Current event is no longer present in GUI "undo log"
self._saved_states[self._current_state_index][
"in_client_log"
] = False
self._current_state_index -= 1
else:
# Other commands move the pointer forward
self._current_state_index += 1
def _create_actual_active_frame(self, state):
return state["stack"][-1]._replace(**state["active_frame_overrides"])
def _report_state(self, state_index):
in_present = state_index == len(self._saved_states) - 1
if in_present:
# For reported new events re-export stack to make sure it is not shared.
# (There is tiny chance that sharing previous state
# after executing BinOp, Attribute, Compare or Subscript
# was not the right choice. See tag_nodes for more.)
# Re-exporting reduces the harm by showing correct data at least
# for present states.
self._saved_states[state_index]["stack"] = self._export_stack()
# need to make a copy for applying overrides
# and removing helper fields without modifying original
state = self._saved_states[state_index].copy()
state["stack"] = state["stack"].copy()
state["in_present"] = in_present
if not in_present:
# for past states fix the newest frame
state["stack"][-1] = self._create_actual_active_frame(state)
del state["exception_value"]
del state["active_frame_overrides"]
# Convert stack of TempFrameInfos to stack of FrameInfos
new_stack = []
for tframe in state["stack"]:
system_frame = tframe.system_frame
module_name = system_frame.f_globals["__name__"]
code_name = system_frame.f_code.co_name
source, firstlineno, in_library = self._vm._get_frame_source_info(
system_frame
)
assert firstlineno is not None, "nofir " + str(system_frame)
new_stack.append(
FrameInfo(
id=id(system_frame),
filename=system_frame.f_code.co_filename,
module_name=module_name,
code_name=code_name,
locals=tframe.locals,
globals=tframe.globals,
freevars=system_frame.f_code.co_freevars,
source=source,
lineno=system_frame.f_lineno,
firstlineno=firstlineno,
in_library=in_library,
event=tframe.event,
focus=tframe.focus,
node_tags=tframe.node_tags,
current_statement=tframe.current_statement,
current_evaluations=tframe.current_evaluations,
current_root_expression=tframe.current_root_expression,
)
)
state["stack"] = new_stack
state["tracer_class"] = "NiceTracer"
self._vm.send_message(DebuggerResponse(**state))
def _try_interpret_as_again_event(
self, frame, original_event, original_args, original_node
):
"""
Some after_* events can be interpreted also as
"before_*_again" events (eg. when last argument of a call was
evaluated, then we are just before executing the final stage of the call)
"""
if original_event == "after_expression":
value = original_args.get("value")
if (
"last_child" in original_node.tags
or "or_arg" in original_node.tags
and value
or "and_arg" in original_node.tags
and not value
):
# there may be explicit exceptions
if (
"skip_after_statement_again" in original_node.parent_node.tags
or "skip_after_expression_again" in original_node.parent_node.tags
):
return
# next step will be finalizing evaluation of parent of current expr
# so let's say we're before that parent expression
again_args = {"node_id": id(original_node.parent_node)}
again_event = (
"before_expression_again"
if "child_of_expression" in original_node.tags
else "before_statement_again"
)
self._handle_progress_event(
frame, again_event, again_args, original_node.parent_node
)
def _cmd_step_over_completed(self, frame, cmd):
"""
Identifies the moment when piece of code indicated by cmd.frame_id and cmd.focus
has completed execution (either successfully or not).
"""
if self._at_a_breakpoint(frame, cmd):
return True
# Make sure the correct frame_id is selected
if id(frame.system_frame) == cmd.frame_id:
# We're in the same frame
if "before_" in cmd.state:
if not range_contains_smaller_or_equal(cmd.focus, frame.focus):
# Focus has changed, command has completed
return True
else:
# Keep running
return False
elif "after_" in cmd.state:
if (
frame.focus != cmd.focus
or "before_" in frame.event
or "_expression" in cmd.state
and "_statement" in frame.event
or "_statement" in cmd.state
and "_expression" in frame.event
):
# The state has changed, command has completed
return True
else:
# Keep running
return False
else:
# We're in another frame
if self._frame_is_alive(cmd.frame_id):
# We're in a successor frame, keep running
return False
else:
# Original frame has completed, assumedly because of an exception
# We're done
return True
return True # not actually required, just to make Pylint happy
def _cmd_step_into_completed(self, frame, cmd):
return frame.event != "after_statement"
def _cmd_step_back_completed(self, frame, cmd):
# Check if the selected message has been previously sent to front-end
return (
self._saved_states[self._current_state_index]["in_client_log"]
or self._current_state_index == 0
)
def _cmd_step_out_completed(self, frame, cmd):
if self._current_state_index == 0:
return False
if frame.event == "after_statement":
return False
if self._at_a_breakpoint(frame, cmd):
return True
prev_state_frame = self._saved_states[self._current_state_index - 1]["stack"][
-1
]
return (
# the frame has completed
not self._frame_is_alive(cmd.frame_id)
# we're in the same frame but on higher level
# TODO: expression inside statement expression has same range as its parent
or id(frame.system_frame) == cmd.frame_id
and range_contains_smaller(frame.focus, cmd.focus)
# or we were there in prev state
or id(prev_state_frame.system_frame) == cmd.frame_id
and range_contains_smaller(prev_state_frame.focus, cmd.focus)
)
def _cmd_resume_completed(self, frame, cmd):
return self._at_a_breakpoint(frame, cmd)
def _cmd_run_to_cursor_completed(self, frame, cmd):
return self._at_a_breakpoint(
frame, cmd, self._get_breakpoints_with_cursor_position(cmd)
)
def _at_a_breakpoint(self, frame, cmd, breakpoints=None):
if breakpoints is None:
breakpoints = cmd["breakpoints"]
return (
frame.event in ["before_statement", "before_expression"]
and frame.system_frame.f_code.co_filename in breakpoints
and frame.focus.lineno in breakpoints[frame.system_frame.f_code.co_filename]
# consider only first event on a line
# (but take into account that same line may be reentered)
and (
cmd.focus is None
or (cmd.focus.lineno != frame.focus.lineno)
or (cmd.focus == frame.focus and cmd.state == frame.event)
or id(frame.system_frame) != cmd.frame_id
)
)
def _frame_is_alive(self, frame_id):
for frame in self._custom_stack:
if id(frame.system_frame) == frame_id:
return True
return False
def _export_stack(self):
result = []
exported_globals_per_module = {}
def export_globals(module_name, frame):
if module_name not in exported_globals_per_module:
exported_globals_per_module[module_name] = self._vm.export_variables(
frame.f_globals
)
return exported_globals_per_module[module_name]
for custom_frame in self._custom_stack:
system_frame = custom_frame.system_frame
module_name = system_frame.f_globals["__name__"]
result.append(
TempFrameInfo(
# need to store the reference to the frame to avoid it being GC-d
# otherwise frame id-s would be reused and this would
# mess up communication with the frontend.
system_frame=system_frame,
locals=None
if system_frame.f_locals is system_frame.f_globals
else self._vm.export_variables(system_frame.f_locals),
globals=export_globals(module_name, system_frame),
event=custom_frame.event,
focus=custom_frame.focus,
node_tags=custom_frame.node_tags,
current_evaluations=custom_frame.current_evaluations.copy(),
current_statement=custom_frame.current_statement,
current_root_expression=custom_frame.current_root_expression,
)
)
return result
def _thonny_hidden_before_stmt(self, node_id):
# The code to be debugged will be instrumented with this function
# inserted before each statement.
# Entry into this function indicates that statement as given
# by the code range is about to be evaluated next.
return None
def _thonny_hidden_after_stmt(self, node_id):
# The code to be debugged will be instrumented with this function
# inserted after each statement.
# Entry into this function indicates that statement as given
# by the code range was just executed successfully.
return None
def _thonny_hidden_before_expr(self, node_id):
# Entry into this function indicates that expression as given
# by the code range is about to be evaluated next
return node_id
def _thonny_hidden_after_expr(self, node_id, value):
# The code to be debugged will be instrumented with this function
# wrapped around each expression (given as 2nd argument).
# Entry into this function indicates that expression as given
# by the code range was just evaluated to given value
return value
def _tag_nodes(self, root):
"""Marks interesting properties of AST nodes"""
# ast_utils need to be imported after asttokens
# is (custom-)imported
from thonny import ast_utils
def add_tag(node, tag):
if not hasattr(node, "tags"):
node.tags = set()
node.tags.add("class=" + node.__class__.__name__)
node.tags.add(tag)
# ignore module docstring if it is before from __future__ import
if (
isinstance(root.body[0], ast.Expr)
and isinstance(root.body[0].value, ast.Str)
and len(root.body) > 1
and isinstance(root.body[1], ast.ImportFrom)
and root.body[1].module == "__future__"
):
add_tag(root.body[0], "ignore")
add_tag(root.body[0].value, "ignore")
add_tag(root.body[1], "ignore")
for node in ast.walk(root):
if not isinstance(node, (ast.expr, ast.stmt)):
continue
# tag last children
last_child = ast_utils.get_last_child(node)
assert last_child in [True, False, None] or isinstance(
last_child, (ast.expr, ast.stmt, type(None))
), ("Bad last child " + str(last_child) + " of " + str(node))
if last_child is not None:
add_tag(node, "has_children")
if isinstance(last_child, ast.AST):
last_child.parent_node = node
add_tag(last_child, "last_child")
if isinstance(node, _ast.expr):
add_tag(last_child, "child_of_expression")
else:
add_tag(last_child, "child_of_statement")
if isinstance(node, ast.Call):
add_tag(last_child, "last_call_arg")
# other cases
if isinstance(node, ast.Call):
add_tag(node.func, "call_function")
node.func.parent_node = node
if isinstance(node, ast.BoolOp) and node.op == ast.Or():
for child in node.values:
add_tag(child, "or_arg")
child.parent_node = node
if isinstance(node, ast.BoolOp) and node.op == ast.And():
for child in node.values:
add_tag(child, "and_arg")
child.parent_node = node
# TODO: assert (it doesn't evaluate msg when test == True)
if isinstance(node, ast.Str):
add_tag(node, "StringLiteral")
add_tag(node, "skipexport")
if hasattr(ast, "JoinedStr") and isinstance(node, ast.JoinedStr):
# can't present children normally without
# ast giving correct locations for them
add_tag(node, "ignore_children")
elif isinstance(node, ast.Num):
add_tag(node, "NumberLiteral")
add_tag(node, "skipexport")
elif isinstance(node, ast.List):
add_tag(node, "skipexport")
elif isinstance(node, ast.Tuple):
add_tag(node, "skipexport")
elif isinstance(node, ast.Set):
add_tag(node, "skipexport")
elif isinstance(node, ast.Dict):
add_tag(node, "skipexport")
elif isinstance(node, ast.Name):
add_tag(node, "skipexport")
elif isinstance(node, ast.NameConstant):
add_tag(node, "skipexport")
elif isinstance(node, ast.Expr):
if not isinstance(node.value, (ast.Yield, ast.YieldFrom)):
add_tag(node, "skipexport")
elif isinstance(node, ast.If):
add_tag(node, "skipexport")
elif isinstance(node, ast.Return):
add_tag(node, "skipexport")
elif isinstance(node, ast.While):
add_tag(node, "skipexport")
elif isinstance(node, ast.Continue):
add_tag(node, "skipexport")
elif isinstance(node, ast.Break):
add_tag(node, "skipexport")
elif isinstance(node, ast.Pass):
add_tag(node, "skipexport")
elif isinstance(node, ast.For):
add_tag(node, "skipexport")
elif isinstance(node, ast.Try):
add_tag(node, "skipexport")
elif isinstance(node, ast.ListComp):
add_tag(node.elt, "ListComp.elt")
if len(node.generators) > 1:
add_tag(node, "ignore_children")
elif isinstance(node, ast.SetComp):
add_tag(node.elt, "SetComp.elt")
if len(node.generators) > 1:
add_tag(node, "ignore_children")
elif isinstance(node, ast.DictComp):
add_tag(node.key, "DictComp.key")
add_tag(node.value, "DictComp.value")
if len(node.generators) > 1:
add_tag(node, "ignore_children")
elif isinstance(node, ast.BinOp):
# TODO: use static analysis to detect type of left child
add_tag(node, "skipexport")
elif isinstance(node, ast.Attribute):
# TODO: use static analysis to detect type of left child
add_tag(node, "skipexport")
elif isinstance(node, ast.Subscript):
# TODO: use static analysis to detect type of left child
add_tag(node, "skipexport")
elif isinstance(node, ast.Compare):
# TODO: use static analysis to detect type of left child
add_tag(node, "skipexport")
if isinstance(node, ast.comprehension):
for expr in node.ifs:
add_tag(expr, "comprehension.if")
if isinstance(node, (ast.Assign)):
# value will be presented in assignment's before_statement_again
add_tag(node.value, "skip_after_expression")
if isinstance(
node, (ast.Expr, ast.While, ast.For, ast.If, ast.Try, ast.With)
):
add_tag(node, "skip_after_statement_again")
# make sure every node has this field
if not hasattr(node, "tags"):
node.tags = set()
def _should_instrument_as_expression(self, node):
return (
isinstance(node, _ast.expr)
and not getattr(node, "incorrect_range", False)
and "ignore" not in node.tags
and (not hasattr(node, "ctx") or isinstance(node.ctx, ast.Load))
# TODO: repeatedly evaluated subexpressions of comprehensions
# can be supported (but it requires some redesign both in backend and GUI)
and "ListComp.elt" not in node.tags
and "SetComp.elt" not in node.tags
and "DictComp.key" not in node.tags
and "DictComp.value" not in node.tags
and "comprehension.if" not in node.tags
)
def _should_instrument_as_statement(self, node):
return (
isinstance(node, _ast.stmt)
and not getattr(node, "incorrect_range", False)
and "ignore" not in node.tags
# Shouldn't insert anything before from __future__ import
# as this is not a normal statement
# https://bitbucket.org/plas/thonny/issues/183/thonny-throws-false-positive-syntaxerror
and (not isinstance(node, ast.ImportFrom) or node.module != "__future__")
)
def _insert_statement_markers(self, root):
# find lists of statements and insert before/after markers for each statement
for name, value in ast.iter_fields(root):
if isinstance(root, ast.Try) and name == "handlers":
# contains statements but is not statement itself
for handler in value:
self._insert_statement_markers(handler)
elif isinstance(value, ast.AST):
self._insert_statement_markers(value)
elif isinstance(value, list):
if len(value) > 0:
new_list = []
for node in value:
if self._should_instrument_as_statement(node):
# self._debug("EBFOMA", node)
# add before marker
new_list.append(
self._create_statement_marker(
node, BEFORE_STATEMENT_MARKER
)
)
# original statement
if self._should_instrument_as_statement(node):
self._insert_statement_markers(node)
new_list.append(node)
if (
self._should_instrument_as_statement(node)
and "skipexport" not in node.tags
):
# add after marker
new_list.append(
self._create_statement_marker(
node, AFTER_STATEMENT_MARKER
)
)
setattr(root, name, new_list)
def _create_statement_marker(self, node, function_name):
call = self._create_simple_marker_call(node, function_name)
stmt = ast.Expr(value=call)
ast.copy_location(stmt, node)
ast.fix_missing_locations(stmt)
return stmt
def _insert_for_target_markers(self, root):
"""inserts markers which notify assignment to for-loop variables"""
for node in ast.walk(root):
if isinstance(node, ast.For):
old_target = node.target
# print(vars(old_target))
temp_name = "__for_loop_var"
node.target = ast.Name(temp_name, ast.Store())
name_load = ast.Name(temp_name, ast.Load())
# value will be visible in parent's before_statement_again event
name_load.tags = {
"skip_before_expression",
"skip_after_expression",
"last_child",
}
name_load.lineno, name_load.col_offset = (
node.iter.lineno,
node.iter.col_offset,
)
name_load.end_lineno, name_load.end_col_offset = (
node.iter.end_lineno,
node.iter.end_col_offset,
)
before_name_load = self._create_simple_marker_call(
name_load, BEFORE_EXPRESSION_MARKER
)
after_name_load = ast.Call(
func=ast.Name(id=AFTER_EXPRESSION_MARKER, ctx=ast.Load()),
args=[before_name_load, name_load],
keywords=[],
)
ass = ast.Assign([old_target], after_name_load)
ass.lineno, ass.col_offset = old_target.lineno, old_target.col_offset
ass.end_lineno, ass.end_col_offset = (
node.iter.end_lineno,
node.iter.end_col_offset,
)
ass.tags = {
"skip_before_statement"
} # before_statement_again will be shown
name_load.parent_node = ass
ass_before = self._create_statement_marker(ass, BEFORE_STATEMENT_MARKER)
node.body.insert(0, ass_before)
node.body.insert(1, ass)
node.body.insert(
2, self._create_statement_marker(ass, AFTER_STATEMENT_MARKER)
)
ast.fix_missing_locations(node)
def _insert_expression_markers(self, node):
"""
TODO: this docstring is outdated
each expression e gets wrapped like this:
_after(_before(_loc, _node_is_zoomable), e, _node_role, _parent_range)
where
_after is function that gives the resulting value
_before is function that signals the beginning of evaluation of e
_loc gives the code range of e
_node_is_zoomable indicates whether this node has subexpressions
_node_role is either 'last_call_arg', 'last_op_arg', 'first_or_arg',
'first_and_arg', 'function' or None
"""
tracer = self
class ExpressionVisitor(ast.NodeTransformer):
def generic_visit(self, node):
if isinstance(node, _ast.expr):
if isinstance(node, ast.Starred):
# keep this node as is, but instrument its children
return ast.NodeTransformer.generic_visit(self, node)
elif tracer._should_instrument_as_expression(node):
# before marker
before_marker = tracer._create_simple_marker_call(
node, BEFORE_EXPRESSION_MARKER
)
ast.copy_location(before_marker, node)
if "ignore_children" in node.tags:
transformed_node = node
else:
transformed_node = ast.NodeTransformer.generic_visit(
self, node
)
# after marker
after_marker = ast.Call(
func=ast.Name(id=AFTER_EXPRESSION_MARKER, ctx=ast.Load()),
args=[before_marker, transformed_node],
keywords=[],
)
ast.copy_location(after_marker, node)
ast.fix_missing_locations(after_marker)
# further transformations may query original node location from after marker
if hasattr(node, "end_lineno"):
after_marker.end_lineno = node.end_lineno
after_marker.end_col_offset = node.end_col_offset
return after_marker
else:
# This expression (and its children) should be ignored
return node
else:
# Descend into statements
return ast.NodeTransformer.generic_visit(self, node)
return ExpressionVisitor().visit(node)
def _create_simple_marker_call(self, node, fun_name, extra_args=[]):
args = [self._export_node(node)] + extra_args
return ast.Call(
func=ast.Name(id=fun_name, ctx=ast.Load()), args=args, keywords=[]
)
def _export_node(self, node):
assert isinstance(node, (ast.expr, ast.stmt))
node_id = id(node)
self._nodes[node_id] = node
return ast.Num(node_id)
def _debug(self, *args):
logger.debug("TRACER: " + str(args))
def _execute_prepared_user_code(self, statements, expression, global_vars):
try:
return Tracer._execute_prepared_user_code(
self, statements, expression, global_vars
)
finally:
"""
from thonny.misc_utils import _win_get_used_memory
print("Memory:", _win_get_used_memory() / 1024 / 1024)
print("States:", len(self._saved_states))
print(self._fulltags.most_common())
"""
class CustomStackFrame:
def __init__(self, frame, event, focus=None):
self.system_frame = frame
self.event = event
self.focus = focus
self.current_evaluations = []
self.current_statement = None
self.current_root_expression = None
class FancySourceFileLoader(SourceFileLoader):
"""Used for loading and instrumenting user modules during fancy tracing"""
def __init__(self, fullname, path, tracer):
super().__init__(fullname, path)
self._tracer = tracer
def source_to_code(self, data, path, *, _optimize=-1):
old_tracer = sys.gettrace()
sys.settrace(None)
try:
root = self._tracer._prepare_ast(data, path, "exec")
return super().source_to_code(root, path)
finally:
sys.settrace(old_tracer)
def _get_frame_prefix(frame):
return str(id(frame)) + " " + ">" * len(inspect.getouterframes(frame, 0)) + " "
def _get_python_version_string(add_word_size=False):
result = ".".join(map(str, sys.version_info[:3]))
if sys.version_info[3] != "final":
result += "-" + sys.version_info[3]
if add_word_size:
result += " (" + ("64" if sys.maxsize > 2 ** 32 else "32") + " bit)"
return result
def _fetch_frame_source_info(frame):
if frame.f_code.co_filename is None or not os.path.exists(frame.f_code.co_filename):
return None, None, True
is_libra = _is_library_file(frame.f_code.co_filename)
if frame.f_code.co_name == "<module>":
# inspect.getsource and getsourcelines don't help here
with tokenize.open(frame.f_code.co_filename) as fp:
return fp.read(), 1, is_libra
else:
# function or class
try:
source = inspect.getsource(frame.f_code)
# inspect.getsource is not reliable, see eg:
# https://bugs.python.org/issue35101
# If the code name is not present as definition
# in the beginning of the source,
# then play safe and return the whole script
first_line = source.splitlines()[0]
if (
re.search(
r"\b(class|def)\b\s+\b%s\b" % frame.f_code.co_name, first_line
)
is None
):
with tokenize.open(frame.f_code.co_filename) as fp:
return fp.read(), 1, is_libra
else:
return source, frame.f_code.co_firstlineno, is_libra
except OSError:
logger.exception("Problem getting source")
return None, None, True
def format_exception_with_frame_info(
e_type, e_value, e_traceback, shorten_filenames=False
):
"""Need to suppress thonny frames to avoid confusion"""
_traceback_message = "Traceback (most recent call last):\n"
_cause_message = getattr(
traceback,
"_cause_message",
(
"\nThe above exception was the direct cause "
+ "of the following exception:\n\n"
),
)
_context_message = getattr(
traceback,
"_context_message",
(
"\nDuring handling of the above exception, "
+ "another exception occurred:\n\n"
),
)
def rec_format_exception_with_frame_info(etype, value, tb, chain=True):
# Based on
# https://www.python.org/dev/peps/pep-3134/#enhanced-reporting
# and traceback.format_exception
if etype is None:
etype = type(value)
if tb is None:
tb = value.__traceback__
if chain:
if value.__cause__ is not None:
yield from rec_format_exception_with_frame_info(
None, value.__cause__, None
)
yield (_cause_message, None, None, None)
elif value.__context__ is not None and not value.__suppress_context__:
yield from rec_format_exception_with_frame_info(
None, value.__context__, None
)
yield (_context_message, None, None, None)
if tb is not None:
yield (_traceback_message, None, None, None)
have_seen_first_relevant_frame = False
tb_temp = tb
for entry in traceback.extract_tb(tb):
assert tb_temp is not None # actual tb doesn't end before extract_tb
if (
"thonny/backend" not in entry.filename
and "thonny\\backend" not in entry.filename
and (
not entry.filename.endswith(os.sep + "ast.py")
or entry.name != "parse"
or etype is not SyntaxError
)
or have_seen_first_relevant_frame
or in_debug_mode()
):
have_seen_first_relevant_frame = True
fmt = ' File "{}", line {}, in {}\n'.format(
entry.filename, entry.lineno, entry.name
)
if entry.line:
fmt += " {}\n".format(entry.line.strip())
yield (fmt, id(tb_temp.tb_frame), entry.filename, entry.lineno)
tb_temp = tb_temp.tb_next
assert tb_temp is None # tb was exhausted
for line in traceback.format_exception_only(etype, value):
if etype is SyntaxError and line.endswith("^\n"):
# for some reason it may add several empty lines before ^-line
partlines = line.splitlines()
while len(partlines) >= 2 and partlines[-2].strip() == "":
del partlines[-2]
line = "\n".join(partlines) + "\n"
yield (line, None, None, None)
items = rec_format_exception_with_frame_info(e_type, e_value, e_traceback)
return list(items)
def in_debug_mode():
return os.environ.get("THONNY_DEBUG", False) in [1, "1", True, "True", "true"]
def _is_library_file(filename):
return (
filename is None
or path_startswith(filename, sys.prefix)
or hasattr(sys, "base_prefix")
and path_startswith(filename, sys.base_prefix)
or hasattr(sys, "real_prefix")
and path_startswith(filename, getattr(sys, "real_prefix"))
or site.ENABLE_USER_SITE
and path_startswith(filename, site.getusersitepackages())
)
def _report_internal_error():
print("PROBLEM WITH THONNY'S BACK-END:\n", file=sys.stderr)
traceback.print_exc()
def get_vm():
return _vm
|
updateikfiles.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2011 Masaho Ishida, Rosen Diankov <rosen.diankov@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""updates the cached ikfast files in the plugins/ikfastsolvers directory
To update openrave cached ik files run:
.. code-block:: bash
python updateikfiles.py --forcegenerate --destdir=../plugins/ikfastsolvers --testnum=100
"""
from numpy import *
from itertools import *
import time,platform,os,sys
import multiprocessing
from optparse import OptionParser
import logging
import pickle
from openravepy import *
from openravepy import ikfast
databases.inversekinematics.log.setLevel(logging.ERROR)
def updateik(robotfilename,manipname,iktype,destfilename=None,freeindices=None,results=None, do_test=True, forcegenerate=False, testnum=5000, delta='0.01'):
print robotfilename, manipname, iktype, destfilename
RaveInitialize()
env=Environment()
env.StopSimulation()
try:
with env:
robot = env.ReadRobotXMLFile(robotfilename,{'skipgeometry':'1'})
env.AddRobot(robot)
if manipname is not None:
manip = robot.SetActiveManipulator(manipname)
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot,iktype=iktype,freeindices=freeindices)
ikmodel.manip.SetIKSolver(None)
ikmodel.ikfast.log.setLevel(logging.ERROR)
if forcegenerate or not ikmodel.load():
ikmodel.autogenerate()
if freeindices is not None:
ikmodel.setrobot([float(delta)]*len(freeindices))
if do_test and testnum > 0:
successrate, wrongrate = ikmodel.testik(str(testnum))
if results is not None:
results[0].value = successrate
results[1].value = wrongrate
results[2].value = mean(ikmodel.perftiming(100000))
if destfilename is not None:
robotid = os.path.split(destfilename)[1][:-4]
code = """#define IKFAST_NAMESPACE %s
#include "plugindefs.h"
"""%robotid
sourcefilename = ikmodel.getsourcefilename(True)
if len(sourcefilename) == 0:
raise ValueError(u'robot %s manip %s cannot generate ik %s'%(robot.GetName(),manip.GetName(),iktype))
code += open(sourcefilename,'r').read()
code += """
#include "plugindefs.h"
namespace IKFAST_NAMESPACE {
IkSolverBasePtr CreateIkSolver(EnvironmentBasePtr penv, std::istream& sinput, const std::vector<dReal>& vfreeinc) {
boost::shared_ptr<ikfast::IkFastFunctions<IkReal> > ikfunctions(new ikfast::IkFastFunctions<IkReal>());
ikfunctions->_ComputeIk = IKFAST_NAMESPACE::ComputeIk;
ikfunctions->_ComputeFk = IKFAST_NAMESPACE::ComputeFk;
ikfunctions->_GetNumFreeParameters = IKFAST_NAMESPACE::GetNumFreeParameters;
ikfunctions->_GetFreeParameters = IKFAST_NAMESPACE::GetFreeParameters;
ikfunctions->_GetNumJoints = IKFAST_NAMESPACE::GetNumJoints;
ikfunctions->_GetIkRealSize = IKFAST_NAMESPACE::GetIkRealSize;
ikfunctions->_GetIkFastVersion = IKFAST_NAMESPACE::GetIkFastVersion;
ikfunctions->_GetIkType = IKFAST_NAMESPACE::GetIkType;
ikfunctions->_GetKinematicsHash = IKFAST_NAMESPACE::GetKinematicsHash;
return CreateIkFastSolver(penv,sinput,ikfunctions,vfreeinc);
}
} // end namespace
"""
print 'writing %s'%destfilename
open(destfilename,'w').write(code)
finally:
print "destroying environment"
env.Destroy()
def get_freeindies_combinations(robot_file, manip_name):
print robot_file, manip_name
if manip_name == None:
return [None]
RaveInitialize()
env=Environment()
env.StopSimulation()
env.Load(robot_file)
robot=env.GetRobots()[0]
manip=robot.SetActiveManipulator(manip_name)
joints = manip.GetArmIndices()
if len(joints) <= 6:
freeindies_combination=[None]
else:
freeindies_combination = list(combinations(joints, len(joints)-6))
RaveDestroy()
return freeindies_combination
if __name__ == "__main__":
parser = OptionParser()
usage = "usage: %prog [options] <arg>"
parser = OptionParser(usage)
parser.add_option("-n", "--numthreads", dest="numthreads", default=multiprocessing.cpu_count(), help='the number of using core')
parser.add_option("-l", "--timelimit", dest="time_limit", default='1200', help='time to stop test ik.')
parser.add_option("-d", "--destdir", dest="destdir", default=None,
help='destination directory to save ik file results ')
parser.add_option("-r", "--robot", dest="robot", default=None, help='Robot file path')
parser.add_option("-m", "--manip", dest="manip", default=None, help='Manipulator')
parser.add_option("-t", "--type", dest="type", default=IkParameterization.Type.Transform6D, help='Ik type')
parser.add_option("--testnum", dest="testnum", type='int', default=5000, help='the number of ik test sets')
parser.add_option('--forcegenerate', dest='forcegenerate',action='store_true',default=False,help='if true will always force generation of ik')
parser.add_option("-e", "--delta", dest="delta", default='0.01', help='the step of free indies angle')
(options, args) = parser.parse_args()
numthreads=int(options.numthreads)
time_limit=int(options.time_limit) #seconds
robot_manip_type_fouts = None
if (not options.robot is None and options.manip is None) or (options.robot is None and not options.manip is None):
print 'set robot and manip name'
sys.exit (0)
elif not options.robot is None and not options.manip is None:
fout = os.path.splitext(os.path.basename(options.robot))[0]+'.cpp'
robot_manip_type_fouts = [[options.robot, options.manip, options.type, True, fout]]
# default files
if robot_manip_type_fouts is None:
robot_manip_type_fouts=[['robots/puma.robot.xml', None, IkParameterization.Type.Transform6D, False, 'ik_puma.cpp'],
['robots/barrettwam.robot.xml', None, IkParameterization.Type.Transform6D, False, 'ik_barrettwam.cpp'],
['robots/pa10schunk.robot.xml','arm',IkParameterization.Type.Transform6D, False, 'ik_pa10.cpp'],
['robots/pr2-beta-static.zae','head',IkParameterization.Type.Lookat3D, False, 'ik_pr2_head.cpp'],
['robots/pr2-beta-static.zae','head_torso',IkParameterization.Type.Lookat3D,False,'ik_pr2_head_torso.cpp'],
['robots/pr2-beta-static.zae','leftarm',IkParameterization.Type.Transform6D,False,'ik_pr2_leftarm.cpp'],
['robots/pr2-beta-static.zae','rightarm',IkParameterization.Type.Transform6D,False,'ik_pr2_rightarm.cpp'],
['robots/pr2-beta-static.zae','leftarm_torso',IkParameterization.Type.Transform6D,False,'ik_pr2_leftarm_torso.cpp'],
['robots/pr2-beta-static.zae','rightarm_torso',IkParameterization.Type.Transform6D,False,'ik_pr2_rightarm_torso.cpp'],
['robots/schunk-lwa3.zae',None,IkParameterization.Type.Transform6D,False,'ik_schunk_lwa3.cpp'],
['robots/neuronics-katana.zae','arm',IkParameterization.Type.TranslationDirection5D,False,'ik_katana5d.cpp'],
['robots/neuronics-katana.zae','armgrasp',IkParameterization.Type.Translation3D,False,'ik_katana5d_trans.cpp']]
# create all jobs/args
args = []
robotmanip_offsets = []
offset = 0
for robotfilename,manipname,iktype,testallindices,destfilename in robot_manip_type_fouts:
if testallindices:
freeindices_combs = get_freeindies_combinations(robotfilename,manipname)
else:
freeindices_combs = [None] # take best one
robotmanip_offsets.append([offset,len(freeindices_combs)])
offset += len(freeindices_combs)
for freeindices in freeindices_combs:
a={'robotfilename':robotfilename, 'manipname':manipname, 'iktype':iktype, 'freeindices':freeindices, 'testnum':options.testnum, 'delta':options.delta, 'forcegenerate':options.forcegenerate}
if destfilename is not None and options.destdir is not None:
a['destfilename'] = os.path.join(options.destdir, destfilename)
args.append(a)
finalresults=[None]*len(args)
timer=0
processes = []
try:
starttime=time.time()
for i in range (len(args)):
results = [multiprocessing.Value('f'),multiprocessing.Value('f'),multiprocessing.Value('f')]
results[0].value = 0
results[1].value = 0
results[2].value = 0
kwargs = dict(args[i])
kwargs['results'] = results
p = multiprocessing.Process(target=updateik, kwargs=kwargs)
print 'start process ('+str(i)+'/'+str(args[i])+')'
p.start()
p.endtime = (time.time()-starttime)+time_limit
p.index = i
p.results = results
p.kwargs = kwargs
processes.append(p)
# wait until thread finishes, or it times out
waituntilend = len(args)==i+1
while len(processes) >= numthreads or (waituntilend and len(processes)>0):
terminateprocesses = []
for p in processes:
if not p.is_alive():
finalresults[p.index] = [p.results[0].value,p.results[1].value,p.results[2].value]
print 'finished %s with %s rate'%(p.kwargs,finalresults[p.index])
processes.remove(p)
elif p.endtime < (time.time()-starttime):
terminateprocesses.append(p)
for p in terminateprocesses:
p.terminate()
p.join()
processes.remove(p)
if len(processes) >= numthreads or (waituntilend and len(processes)>0):
time.sleep(1)
#if None or 0.0, failed.
print finalresults
finally:
for p in processes:
p.terminate()
p.join()
saveresults = [[args[i], finalresults[i]] for i in range(len(finalresults)) if finalresults[i] is not None]
pickle.dump(saveresults,open(os.path.join(options.destdir, 'results.pp'),'w'))
print 'results: ',saveresults
# select max success rate one in all free indies combinations.
findices=[]
for offset,numjobs in robotmanip_offsets:
sorted_results=[[finalresults[offset+j],offset+j] for j in range(numjobs) if finalresults[offset+j] is not None]
sorted_results.sort(key=lambda k: k[0][2])
sorted_results.sort(key=lambda k: k[0][0],reverse=True)
findex = None
for [successrate,wrongrate,perftime],offset in sorted_results:
if wrongrate <= 0.0 or wrongrate == None:
findex = offset
break
if findex is None:
raise ValueError('ik has failures: %r\n%r'%(args[offset], sorted_results))
findices.append(findex)
for i in findices:
try:
args[i]['forcegenerate'] = False
updateik(do_test=False,**args[i])
except Exception,e:
print e
print 'error occured in writing file %s.'%args[i]
|
plotting.py
|
"""PyVista plotting module."""
import collections.abc
import ctypes
from functools import wraps
import io
import logging
import os
import pathlib
import platform
import textwrap
from threading import Thread
import time
from typing import Dict
import warnings
import weakref
import numpy as np
import scooby
import pyvista
from pyvista import _vtk
from pyvista.utilities import (
abstract_class,
assert_empty_kwargs,
convert_array,
get_array,
is_pyvista_dataset,
numpy_to_texture,
raise_not_matching,
wrap,
)
from ..utilities.misc import PyvistaDeprecationWarning
from ..utilities.regression import image_from_window
from ._plotting import _has_matplotlib, prepare_smooth_shading, process_opacity
from .colors import Color, get_cmap_safe
from .export_vtkjs import export_plotter_vtkjs
from .mapper import make_mapper
from .picking import PickingHelper
from .render_window_interactor import RenderWindowInteractor
from .renderer import Camera, Renderer
from .renderers import Renderers
from .scalar_bars import ScalarBars
from .tools import FONTS, normalize, opacity_transfer_function, parse_font_family # noqa
from .widgets import WidgetHelper
SUPPORTED_FORMATS = [".png", ".jpeg", ".jpg", ".bmp", ".tif", ".tiff"]
VERY_FIRST_RENDER = True # windows plotter helper
# EXPERIMENTAL: permit pyvista to kill the render window
KILL_DISPLAY = platform.system() == 'Linux' and os.environ.get('PYVISTA_KILL_DISPLAY')
if KILL_DISPLAY: # pragma: no cover
# this won't work under wayland
try:
X11 = ctypes.CDLL("libX11.so")
X11.XCloseDisplay.argtypes = [ctypes.c_void_p]
except OSError:
warnings.warn('PYVISTA_KILL_DISPLAY: Unable to load X11.\nProbably using wayland')
KILL_DISPLAY = False
def close_all():
"""Close all open/active plotters and clean up memory.
Returns
-------
bool
``True`` when all plotters have been closed.
"""
for _, p in _ALL_PLOTTERS.items():
if not p._closed:
p.close()
p.deep_clean()
_ALL_PLOTTERS.clear()
return True
log = logging.getLogger(__name__)
log.setLevel('CRITICAL')
log.addHandler(logging.StreamHandler())
def _warn_xserver(): # pragma: no cover
"""Check if plotting is supported and persist this state.
Check once and cache this value between calls. Warn the user if
plotting is not supported. Configured to check on Linux and Mac
OS since the Windows check is not quick.
"""
# disable windows check until we can get a fast way of verifying
# if windows has a windows manager (which it generally does)
if os.name == 'nt':
return
if not hasattr(_warn_xserver, 'has_support'):
_warn_xserver.has_support = pyvista.system_supports_plotting()
if not _warn_xserver.has_support:
# check if a display has been set
if 'DISPLAY' in os.environ:
return
# finally, check if using a backend that doesn't require an xserver
if pyvista.global_theme.jupyter_backend in ['ipygany', 'pythreejs']:
return
# Check if VTK has EGL support
ren_win_str = str(type(_vtk.vtkRenderWindow()))
if 'EGL' in ren_win_str or 'OSOpenGL' in ren_win_str:
return
warnings.warn(
'\n'
'This system does not appear to be running an xserver.\n'
'PyVista will likely segfault when rendering.\n\n'
'Try starting a virtual frame buffer with xvfb, or using\n '
' ``pyvista.start_xvfb()``\n'
)
USE_SCALAR_BAR_ARGS = """
"stitle" is a depreciated keyword and will be removed in a future
release.
Use ``scalar_bar_args`` instead. For example:
scalar_bar_args={'title': 'Scalar Bar Title'}
"""
@abstract_class
class BasePlotter(PickingHelper, WidgetHelper):
"""To be used by the Plotter and pyvistaqt.QtInteractor classes.
Parameters
----------
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one renderer.
Can also accept a string descriptor as shape. E.g.:
* ``shape="3|1"`` means 3 plots on the left and 1 on the right,
* ``shape="4/2"`` means 4 plots on top and 2 at the bottom.
border : bool, optional
Draw a border around each render window. Default ``False``.
border_color : color_like, optional
Either a string, rgb list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1.0, 1.0, 1.0]``
* ``color='#FFFFFF'``
border_width : float, optional
Width of the border in pixels when enabled.
title : str, optional
Window title of the scalar bar
lighting : str, optional
What lighting to set up for the plotter.
Accepted options:
* ``'light_kit'``: a vtk Light Kit composed of 5 lights.
* ``'three lights'``: illumination using 3 lights.
* ``'none'``: no light sources at instantiation.
The default is a Light Kit (to be precise, 5 separate lights
that act like a Light Kit).
theme : pyvista.themes.DefaultTheme, optional
Plot-specific theme.
"""
mouse_position = None
click_position = None
def __init__(
self,
shape=(1, 1),
border=None,
border_color='k',
border_width=2.0,
title=None,
splitting_position=None,
groups=None,
row_weights=None,
col_weights=None,
lighting='light kit',
theme=None,
):
"""Initialize base plotter."""
log.debug('BasePlotter init start')
self._theme = pyvista.themes.DefaultTheme()
if theme is None:
# copy global theme to ensure local plot theme is fixed
# after creation.
self._theme.load_theme(pyvista.global_theme)
else:
if not isinstance(theme, pyvista.themes.DefaultTheme):
raise TypeError(
'Expected ``pyvista.themes.DefaultTheme`` for '
f'``theme``, not {type(theme).__name__}.'
)
self._theme.load_theme(theme)
self.image_transparent_background = self._theme.transparent_background
# optional function to be called prior to closing
self.__before_close_callback = None
self._store_image = False
self.mesh = None
if title is None:
title = self._theme.title
self.title = str(title)
# add renderers
self.renderers = Renderers(
self,
shape,
splitting_position,
row_weights,
col_weights,
groups,
border,
border_color,
border_width,
)
# This keeps track of scalars names already plotted and their ranges
self._scalar_bars = ScalarBars(self)
# track if the camera has been set up
self._first_time = True
# Keep track of the scale
# track if render window has ever been rendered
self._rendered = False
# this helps managing closed plotters
self._closed = False
# lighting style; be forgiving with input (accept underscores
# and ignore case)
lighting_normalized = str(lighting).replace('_', ' ').lower()
if lighting_normalized == 'light kit':
self.enable_lightkit()
elif lighting_normalized == 'three lights':
self.enable_3_lights()
elif lighting_normalized != 'none':
raise ValueError(f'Invalid lighting option "{lighting}".')
# Add self to open plotters
self._id_name = f"{hex(id(self))}-{len(_ALL_PLOTTERS)}"
_ALL_PLOTTERS[self._id_name] = self
# Key bindings
self.reset_key_events()
log.debug('BasePlotter init stop')
self._image_depth_null = None
self.last_image_depth = None
self.last_image = None
self._has_background_layer = False
# set hidden line removal based on theme
if self.theme.hidden_line_removal:
self.enable_hidden_line_removal()
# set antialiasing based on theme
if self.theme.antialiasing:
self.enable_anti_aliasing()
@property
def theme(self):
"""Return or set the theme used for this plotter.
Examples
--------
Use the dark theme for a plotter.
>>> import pyvista
>>> from pyvista import themes
>>> pl = pyvista.Plotter()
>>> pl.theme = themes.DarkTheme()
>>> actor = pl.add_mesh(pyvista.Sphere())
>>> pl.show()
"""
return self._theme
@theme.setter
def theme(self, theme):
if not isinstance(theme, pyvista.themes.DefaultTheme):
raise TypeError(
'Expected a pyvista theme like '
'``pyvista.themes.DefaultTheme``, '
f'not {type(theme).__name__}.'
)
self._theme.load_theme(theme)
def import_gltf(self, filename, set_camera=True):
"""Import a glTF file into the plotter.
See https://www.khronos.org/gltf/ for more information.
Parameters
----------
filename : str
Path to the glTF file.
set_camera : bool, optional
Set the camera viewing angle to one compatible with the
default three.js perspective (``'xy'``).
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> helmet_file = examples.gltf.download_damaged_helmet() # doctest:+SKIP
>>> texture = examples.hdr.download_dikhololo_night() # doctest:+SKIP
>>> pl = pyvista.Plotter() # doctest:+SKIP
>>> pl.import_gltf(helmet_file) # doctest:+SKIP
>>> pl.set_environment_texture(cubemap) # doctest:+SKIP
>>> pl.camera.zoom(1.8) # doctest:+SKIP
>>> pl.show() # doctest:+SKIP
See :ref:`load_gltf` for a full example using this method.
"""
if not _vtk.VTK9: # pragma: no cover
from pyvista.core.errors import VTKVersionError
raise VTKVersionError('Support for glTF requires VTK v9 or newer')
filename = os.path.abspath(os.path.expanduser(str(filename)))
if not os.path.isfile(filename):
raise FileNotFoundError(f'Unable to locate {filename}')
# lazy import here to avoid importing unused modules
from vtkmodules.vtkIOImport import vtkGLTFImporter
importer = vtkGLTFImporter()
importer.SetFileName(filename)
importer.SetRenderWindow(self.ren_win)
importer.Update()
# register last actor in actors
actor = self.renderer.GetActors().GetLastItem()
name = actor.GetAddressAsString("")
self.renderer._actors[name] = actor
# set camera position to a three.js viewing perspective
if set_camera:
self.camera_position = 'xy'
def export_html(self, filename):
"""Export this plotter as an interactive scene to a HTML file.
Parameters
----------
filename : str
Path to export the html file to.
Notes
-----
You will need ``ipywidgets`` and ``pythreejs`` installed for
this feature.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_uniform()
>>> pl = pyvista.Plotter(shape=(1,2))
>>> _ = pl.add_mesh(mesh, scalars='Spatial Point Data', show_edges=True)
>>> pl.subplot(0,1)
>>> _ = pl.add_mesh(mesh, scalars='Spatial Cell Data', show_edges=True)
>>> pl.export_html('pyvista.html') # doctest:+SKIP
"""
pythreejs_renderer = self.to_pythreejs()
# import after converting as we check for pythreejs import first
try:
from ipywidgets.embed import embed_minimal_html
except ImportError: # pragma: no cover
raise ImportError('Please install ipywidgets with:\n' '\n\tpip install ipywidgets')
# convert and write to file
embed_minimal_html(filename, views=[pythreejs_renderer], title=self.title)
def to_pythreejs(self):
"""Convert this plotting scene to a pythreejs renderer.
Returns
-------
ipywidgets.Widget
Widget containing pythreejs renderer.
"""
self._on_first_render_request() # set up camera
from pyvista.jupyter.pv_pythreejs import convert_plotter
return convert_plotter(self)
def export_gltf(self, filename, inline_data=True, rotate_scene=True, save_normals=True):
"""Export the current rendering scene as a glTF file.
Visit https://gltf-viewer.donmccurdy.com/ for an online viewer.
See https://vtk.org/doc/nightly/html/classvtkGLTFExporter.html
for limitations regarding the exporter.
Parameters
----------
filename : str
Path to export the gltf file to.
inline_data : bool, optional
Sets if the binary data be included in the json file as a
base64 string. When ``True``, only one file is exported.
rotate_scene : bool, optional
Rotate scene to be compatible with the glTF specifications.
save_normals : bool, optional
Saves the point array ``'Normals'`` as ``'NORMAL'`` in
the outputted scene.
Examples
--------
Output a simple point cloud represented as balls.
>>> import numpy as np
>>> import pyvista
>>> point_cloud = np.random.random((100, 3))
>>> pdata = pyvista.PolyData(point_cloud)
>>> pdata['orig_sphere'] = np.arange(100)
>>> sphere = pyvista.Sphere(radius=0.02)
>>> pc = pdata.glyph(scale=False, geom=sphere)
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(pc, cmap='reds', smooth_shading=True,
... show_scalar_bar=False)
>>> pl.export_gltf('balls.gltf') # doctest:+SKIP
>>> pl.show()
Output the orientation plotter.
>>> from pyvista import demos
>>> pl = demos.orientation_plotter()
>>> pl.export_gltf('orientation_plotter.gltf') # doctest:+SKIP
>>> pl.show()
"""
if not _vtk.VTK9: # pragma: no cover
from pyvista.core.errors import VTKVersionError
raise VTKVersionError('Support for glTF requires VTK v9 or newer')
if not hasattr(self, "ren_win"):
raise RuntimeError('This plotter has been closed and is unable to export the scene.')
from vtkmodules.vtkIOExport import vtkGLTFExporter
# rotate scene to gltf compatible view
renamed_arrays = [] # any renamed normal arrays
if rotate_scene:
for renderer in self.renderers:
for actor in renderer.actors.values():
if hasattr(actor, 'RotateX'):
actor.RotateX(-90)
actor.RotateZ(-90)
if save_normals:
try:
mapper = actor.GetMapper()
if mapper is None:
continue
dataset = mapper.GetInputAsDataSet()
if 'Normals' in dataset.point_data:
# By default VTK uses the 'Normals' point data for normals
# but gLTF uses NORMAL.
point_data = dataset.GetPointData()
array = point_data.GetArray('Normals')
array.SetName('NORMAL')
renamed_arrays.append(array)
except: # noqa: E722
pass
exporter = vtkGLTFExporter()
exporter.SetRenderWindow(self.ren_win)
exporter.SetFileName(filename)
exporter.SetInlineData(inline_data)
exporter.SetSaveNormal(save_normals)
exporter.Update()
# rotate back if applicable
if rotate_scene:
for renderer in self.renderers:
for actor in renderer.actors.values():
if hasattr(actor, 'RotateX'):
actor.RotateZ(90)
actor.RotateX(90)
# revert any renamed arrays
for array in renamed_arrays:
array.SetName('Normals')
def enable_hidden_line_removal(self, all_renderers=True):
"""Enable hidden line removal.
Wireframe geometry will be drawn using hidden line removal if
the rendering engine supports it.
Disable this with :func:`disable_hidden_line_removal
<BasePlotter.disable_hidden_line_removal>`
Parameters
----------
all_renderers : bool
If ``True``, applies to all renderers in subplots. If
``False``, then only applies to the active renderer.
Examples
--------
Create a side-by-side plotter and render a sphere in wireframe
with hidden line removal enabled on the left and disabled on
the right.
>>> import pyvista
>>> sphere = pyvista.Sphere(theta_resolution=20, phi_resolution=20)
>>> pl = pyvista.Plotter(shape=(1, 2))
>>> _ = pl.add_mesh(sphere, line_width=3, style='wireframe')
>>> _ = pl.add_text("With hidden line removal")
>>> pl.enable_hidden_line_removal(all_renderers=False)
>>> pl.subplot(0, 1)
>>> pl.disable_hidden_line_removal(all_renderers=False)
>>> _ = pl.add_mesh(sphere, line_width=3, style='wireframe')
>>> _ = pl.add_text("Without hidden line removal")
>>> pl.show()
"""
if all_renderers:
for renderer in self.renderers:
renderer.enable_hidden_line_removal()
else:
self.renderer.enable_hidden_line_removal()
def disable_hidden_line_removal(self, all_renderers=True):
"""Disable hidden line removal.
Enable again with :func:`enable_hidden_line_removal
<BasePlotter.enable_hidden_line_removal>`
Parameters
----------
all_renderers : bool
If ``True``, applies to all renderers in subplots. If
``False``, then only applies to the active renderer.
Examples
--------
Enable and then disable hidden line removal.
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.enable_hidden_line_removal()
>>> pl.disable_hidden_line_removal()
"""
if all_renderers:
for renderer in self.renderers:
renderer.disable_hidden_line_removal()
else:
self.renderer.disable_hidden_line_removal()
@property
def scalar_bar(self):
"""First scalar bar. Kept for backwards compatibility."""
return list(self.scalar_bars.values())[0]
@property
def scalar_bars(self):
"""Scalar bars.
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> sphere['Data'] = sphere.points[:, 2]
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(sphere)
>>> plotter.scalar_bars
Scalar Bar Title Interactive
"Data" False
Select a scalar bar actor based on the title of the bar.
>>> plotter.scalar_bars['Data'] # doctest:+SKIP
(vtkmodules.vtkRenderingAnnotation.vtkScalarBarActor)0x7fcd3567ca00
"""
return self._scalar_bars
@property
def _before_close_callback(self):
"""Return the cached function (expecting a reference)."""
if self.__before_close_callback is not None:
return self.__before_close_callback()
@_before_close_callback.setter
def _before_close_callback(self, func):
"""Store a weakref.ref of the function being called."""
if func is not None:
self.__before_close_callback = weakref.ref(func)
else:
self.__before_close_callback = None
@property
def shape(self):
"""Shape of the plotter.
Examples
--------
Return the plotter shape.
>>> import pyvista
>>> plotter = pyvista.Plotter(shape=(2, 2))
>>> plotter.shape
(2, 2)
"""
return self.renderers._shape
@property
def renderer(self):
"""Return the active renderer.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.renderer # doctest:+SKIP
(Renderer)0x7f916129bfa0
"""
return self.renderers.active_renderer
@property
def store_image(self):
"""Store last rendered frame on close.
This is normally disabled to avoid caching the image, and is
enabled by default by setting:
``pyvista.BUILDING_GALLERY = True``
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter(off_screen=True)
>>> pl.store_image = True
>>> _ = pl.add_mesh(pyvista.Cube())
>>> pl.show()
>>> image = pl.last_image
>>> type(image) # doctest:+SKIP
<class 'numpy.ndarray'>
"""
return self._store_image
@store_image.setter
def store_image(self, value):
"""Store last rendered frame on close."""
self._store_image = bool(value)
def subplot(self, index_row, index_column=None):
"""Set the active subplot.
Parameters
----------
index_row : int
Index of the subplot to activate along the rows.
index_column : int
Index of the subplot to activate along the columns.
Examples
--------
Create a 2 wide plot and set the background of right-hand plot
to orange. Add a cube to the left plot and a sphere to the
right.
>>> import pyvista
>>> pl = pyvista.Plotter(shape=(1, 2))
>>> actor = pl.add_mesh(pyvista.Cube())
>>> pl.subplot(0, 1)
>>> actor = pl.add_mesh(pyvista.Sphere())
>>> pl.set_background('orange', all_renderers=False)
>>> pl.show()
"""
self.renderers.set_active_renderer(index_row, index_column)
@wraps(Renderer.add_legend)
def add_legend(self, *args, **kwargs):
"""Wrap ``Renderer.add_legend``."""
return self.renderer.add_legend(*args, **kwargs)
@wraps(Renderer.remove_legend)
def remove_legend(self, *args, **kwargs):
"""Wrap ``Renderer.remove_legend``."""
return self.renderer.remove_legend(*args, **kwargs)
@property
def legend(self):
"""Legend actor.
There can only be one legend actor per renderer. If
``legend`` is ``None``, there is no legend actor.
"""
return self.renderer.legend
@wraps(Renderer.add_floor)
def add_floor(self, *args, **kwargs):
"""Wrap ``Renderer.add_floor``."""
return self.renderer.add_floor(*args, **kwargs)
@wraps(Renderer.remove_floors)
def remove_floors(self, *args, **kwargs):
"""Wrap ``Renderer.remove_floors``."""
return self.renderer.remove_floors(*args, **kwargs)
def enable_3_lights(self, only_active=False):
"""Enable 3-lights illumination.
This will replace all pre-existing lights in the scene.
Parameters
----------
only_active : bool
If ``True``, only change the active renderer. The default
is that every renderer is affected.
Examples
--------
>>> from pyvista import demos
>>> pl = demos.orientation_plotter()
>>> pl.enable_3_lights()
>>> pl.show()
Note how this varies from the default plotting.
>>> pl = demos.orientation_plotter()
>>> pl.show()
"""
def _to_pos(elevation, azimuth):
theta = azimuth * np.pi / 180.0
phi = (90.0 - elevation) * np.pi / 180.0
x = np.sin(theta) * np.sin(phi)
y = np.cos(phi)
z = np.cos(theta) * np.sin(phi)
return x, y, z
renderers = [self.renderer] if only_active else self.renderers
for renderer in renderers:
renderer.remove_all_lights()
# Inspired from Mayavi's version of Raymond Maple 3-lights illumination
intensities = [1, 0.6, 0.5]
all_angles = [(45.0, 45.0), (-30.0, -60.0), (-30.0, 60.0)]
for intensity, angles in zip(intensities, all_angles):
light = pyvista.Light(light_type='camera light')
light.intensity = intensity
light.position = _to_pos(*angles)
for renderer in renderers:
renderer.add_light(light)
def disable_3_lights(self):
"""Please use ``enable_lightkit``, this method has been depreciated."""
from pyvista.core.errors import DeprecationError
raise DeprecationError('DEPRECATED: Please use ``enable_lightkit``')
def enable_lightkit(self, only_active=False):
"""Enable the default light-kit lighting.
See:
https://www.researchgate.net/publication/2926068
This will replace all pre-existing lights in the renderer.
Parameters
----------
only_active : bool
If ``True``, only change the active renderer. The default is that
every renderer is affected.
Examples
--------
Create a plotter without any lights and then enable the
default light kit.
>>> import pyvista
>>> pl = pyvista.Plotter(lighting=None)
>>> pl.enable_lightkit()
>>> actor = pl.add_mesh(pyvista.Cube(), show_edges=True)
>>> pl.show()
"""
renderers = [self.renderer] if only_active else self.renderers
light_kit = _vtk.vtkLightKit()
for renderer in renderers:
renderer.remove_all_lights()
# Use the renderer as a vtkLightKit parser.
# Feed it the LightKit, pop off the vtkLights, put back
# pyvista Lights. This is the price we must pay for using
# inheritance rather than composition.
light_kit.AddLightsToRenderer(renderer)
vtk_lights = renderer.lights
renderer.remove_all_lights()
for vtk_light in vtk_lights:
light = pyvista.Light.from_vtk(vtk_light)
renderer.add_light(light)
renderer.LightFollowCameraOn()
@wraps(Renderer.enable_anti_aliasing)
def enable_anti_aliasing(self, *args, **kwargs):
"""Wrap ``Renderer.enable_anti_aliasing``."""
for renderer in self.renderers:
renderer.enable_anti_aliasing(*args, **kwargs)
@wraps(Renderer.disable_anti_aliasing)
def disable_anti_aliasing(self, *args, **kwargs):
"""Wrap ``Renderer.disable_anti_aliasing``."""
self.renderer.disable_anti_aliasing(*args, **kwargs)
@wraps(Renderer.set_focus)
def set_focus(self, *args, render=True, **kwargs):
"""Wrap ``Renderer.set_focus``."""
log.debug('set_focus: %s, %s', str(args), str(kwargs))
self.renderer.set_focus(*args, **kwargs)
if render:
self.render()
@wraps(Renderer.set_position)
def set_position(self, *args, render=True, **kwargs):
"""Wrap ``Renderer.set_position``."""
self.renderer.set_position(*args, **kwargs)
if render:
self.render()
@wraps(Renderer.set_viewup)
def set_viewup(self, *args, render=True, **kwargs):
"""Wrap ``Renderer.set_viewup``."""
self.renderer.set_viewup(*args, **kwargs)
if render:
self.render()
@wraps(Renderer.add_orientation_widget)
def add_orientation_widget(self, *args, **kwargs):
"""Wrap ``Renderer.add_orientation_widget``."""
return self.renderer.add_orientation_widget(*args, **kwargs)
@wraps(Renderer.add_axes)
def add_axes(self, *args, **kwargs):
"""Wrap ``Renderer.add_axes``."""
return self.renderer.add_axes(*args, **kwargs)
@wraps(Renderer.hide_axes)
def hide_axes(self, *args, **kwargs):
"""Wrap ``Renderer.hide_axes``."""
return self.renderer.hide_axes(*args, **kwargs)
@wraps(Renderer.show_axes)
def show_axes(self, *args, **kwargs):
"""Wrap ``Renderer.show_axes``."""
return self.renderer.show_axes(*args, **kwargs)
@wraps(Renderer.update_bounds_axes)
def update_bounds_axes(self, *args, **kwargs):
"""Wrap ``Renderer.update_bounds_axes``."""
return self.renderer.update_bounds_axes(*args, **kwargs)
@wraps(Renderer.add_chart)
def add_chart(self, *args, **kwargs):
"""Wrap ``Renderer.add_chart``."""
return self.renderer.add_chart(*args, **kwargs)
@wraps(Renderer.remove_chart)
def remove_chart(self, *args, **kwargs):
"""Wrap ``Renderer.remove_chart``."""
return self.renderer.remove_chart(*args, **kwargs)
@wraps(Renderer.add_actor)
def add_actor(self, *args, **kwargs):
"""Wrap ``Renderer.add_actor``."""
return self.renderer.add_actor(*args, **kwargs)
@wraps(Renderer.enable_parallel_projection)
def enable_parallel_projection(self, *args, **kwargs):
"""Wrap ``Renderer.enable_parallel_projection``."""
return self.renderer.enable_parallel_projection(*args, **kwargs)
@wraps(Renderer.disable_parallel_projection)
def disable_parallel_projection(self, *args, **kwargs):
"""Wrap ``Renderer.disable_parallel_projection``."""
return self.renderer.disable_parallel_projection(*args, **kwargs)
@wraps(Renderer.enable_shadows)
def enable_shadows(self, *args, **kwargs):
"""Wrap ``Renderer.enable_shadows``."""
return self.renderer.enable_shadows(*args, **kwargs)
@wraps(Renderer.disable_shadows)
def disable_shadows(self, *args, **kwargs):
"""Wrap ``Renderer.disable_shadows``."""
return self.renderer.disable_shadows(*args, **kwargs)
@property
def parallel_projection(self):
"""Return parallel projection state of active render window."""
return self.renderer.parallel_projection
@parallel_projection.setter
def parallel_projection(self, state):
"""Set parallel projection state of all active render windows."""
self.renderer.parallel_projection = state
@property
def parallel_scale(self):
"""Return parallel scale of active render window."""
return self.renderer.parallel_scale
@parallel_scale.setter
def parallel_scale(self, value):
"""Set parallel scale of all active render windows."""
self.renderer.parallel_scale = value
@wraps(Renderer.add_axes_at_origin)
def add_axes_at_origin(self, *args, **kwargs):
"""Wrap ``Renderer.add_axes_at_origin``."""
return self.renderer.add_axes_at_origin(*args, **kwargs)
@wraps(Renderer.show_bounds)
def show_bounds(self, *args, **kwargs):
"""Wrap ``Renderer.show_bounds``."""
return self.renderer.show_bounds(*args, **kwargs)
@wraps(Renderer.add_bounding_box)
def add_bounding_box(self, *args, **kwargs):
"""Wrap ``Renderer.add_bounding_box``."""
return self.renderer.add_bounding_box(*args, **kwargs)
@wraps(Renderer.remove_bounding_box)
def remove_bounding_box(self, *args, **kwargs):
"""Wrap ``Renderer.remove_bounding_box``."""
return self.renderer.remove_bounding_box(*args, **kwargs)
@wraps(Renderer.remove_bounds_axes)
def remove_bounds_axes(self, *args, **kwargs):
"""Wrap ``Renderer.remove_bounds_axes``."""
return self.renderer.remove_bounds_axes(*args, **kwargs)
@wraps(Renderer.show_grid)
def show_grid(self, *args, **kwargs):
"""Wrap ``Renderer.show_grid``."""
return self.renderer.show_grid(*args, **kwargs)
@wraps(Renderer.set_scale)
def set_scale(self, *args, **kwargs):
"""Wrap ``Renderer.set_scale``."""
return self.renderer.set_scale(*args, **kwargs)
@wraps(Renderer.enable_eye_dome_lighting)
def enable_eye_dome_lighting(self, *args, **kwargs):
"""Wrap ``Renderer.enable_eye_dome_lighting``."""
return self.renderer.enable_eye_dome_lighting(*args, **kwargs)
@wraps(Renderer.disable_eye_dome_lighting)
def disable_eye_dome_lighting(self, *args, **kwargs):
"""Wrap ``Renderer.disable_eye_dome_lighting``."""
self.renderer.disable_eye_dome_lighting(*args, **kwargs)
@wraps(Renderer.reset_camera)
def reset_camera(self, *args, **kwargs):
"""Wrap ``Renderer.reset_camera``."""
self.renderer.reset_camera(*args, **kwargs)
self.render()
@wraps(Renderer.isometric_view)
def isometric_view(self, *args, **kwargs):
"""Wrap ``Renderer.isometric_view``."""
self.renderer.isometric_view(*args, **kwargs)
@wraps(Renderer.view_isometric)
def view_isometric(self, *args, **kwarg):
"""Wrap ``Renderer.view_isometric``."""
self.renderer.view_isometric(*args, **kwarg)
@wraps(Renderer.view_vector)
def view_vector(self, *args, **kwarg):
"""Wrap ``Renderer.view_vector``."""
self.renderer.view_vector(*args, **kwarg)
@wraps(Renderer.view_xy)
def view_xy(self, *args, **kwarg):
"""Wrap ``Renderer.view_xy``."""
self.renderer.view_xy(*args, **kwarg)
@wraps(Renderer.view_yx)
def view_yx(self, *args, **kwarg):
"""Wrap ``Renderer.view_yx``."""
self.renderer.view_yx(*args, **kwarg)
@wraps(Renderer.view_xz)
def view_xz(self, *args, **kwarg):
"""Wrap ``Renderer.view_xz``."""
self.renderer.view_xz(*args, **kwarg)
@wraps(Renderer.view_zx)
def view_zx(self, *args, **kwarg):
"""Wrap ``Renderer.view_zx``."""
self.renderer.view_zx(*args, **kwarg)
@wraps(Renderer.view_yz)
def view_yz(self, *args, **kwarg):
"""Wrap ``Renderer.view_yz``."""
self.renderer.view_yz(*args, **kwarg)
@wraps(Renderer.view_zy)
def view_zy(self, *args, **kwarg):
"""Wrap ``Renderer.view_zy``."""
self.renderer.view_zy(*args, **kwarg)
@wraps(Renderer.disable)
def disable(self, *args, **kwarg):
"""Wrap ``Renderer.disable``."""
self.renderer.disable(*args, **kwarg)
@wraps(Renderer.enable)
def enable(self, *args, **kwarg):
"""Wrap ``Renderer.enable``."""
self.renderer.enable(*args, **kwarg)
@wraps(Renderer.enable_depth_peeling)
def enable_depth_peeling(self, *args, **kwargs):
"""Wrap ``Renderer.enable_depth_peeling``."""
if hasattr(self, 'ren_win'):
result = self.renderer.enable_depth_peeling(*args, **kwargs)
if result:
self.ren_win.AlphaBitPlanesOn()
return result
@wraps(Renderer.disable_depth_peeling)
def disable_depth_peeling(self):
"""Wrap ``Renderer.disable_depth_peeling``."""
if hasattr(self, 'ren_win'):
self.ren_win.AlphaBitPlanesOff()
return self.renderer.disable_depth_peeling()
@wraps(Renderer.get_default_cam_pos)
def get_default_cam_pos(self, *args, **kwargs):
"""Wrap ``Renderer.get_default_cam_pos``."""
return self.renderer.get_default_cam_pos(*args, **kwargs)
@wraps(Renderer.remove_actor)
def remove_actor(self, *args, **kwargs):
"""Wrap ``Renderer.remove_actor``."""
for renderer in self.renderers:
renderer.remove_actor(*args, **kwargs)
return True
@wraps(Renderer.set_environment_texture)
def set_environment_texture(self, *args, **kwargs):
"""Wrap ``Renderer.set_environment_texture``."""
return self.renderer.set_environment_texture(*args, **kwargs)
#### Properties from Renderer ####
@property
def camera(self):
"""Return the active camera of the active renderer."""
if not self.camera_set:
self.camera_position = self.get_default_cam_pos()
self.reset_camera()
self.camera_set = True
return self.renderer.camera
@camera.setter
def camera(self, camera):
"""Set the active camera for the rendering scene."""
self.renderer.camera = camera
@property
def camera_set(self):
"""Return if the camera of the active renderer has been set."""
return self.renderer.camera_set
@camera_set.setter
def camera_set(self, is_set):
"""Set if the camera has been set on the active renderer."""
self.renderer.camera_set = is_set
@property
def bounds(self):
"""Return the bounds of the active renderer."""
return self.renderer.bounds
@property
def length(self):
"""Return the length of the diagonal of the bounding box of the scene."""
return self.renderer.length
@property
def center(self):
"""Return the center of the active renderer."""
return self.renderer.center
@property
def _scalar_bar_slots(self):
"""Return the scalar bar slots of the active renderer."""
return self.renderer._scalar_bar_slots
@_scalar_bar_slots.setter
def _scalar_bar_slots(self, value):
"""Set the scalar bar slots of the active renderer."""
self.renderer._scalar_bar_slots = value
@property
def _scalar_bar_slot_lookup(self):
"""Return the scalar bar slot lookup of the active renderer."""
return self.renderer._scalar_bar_slot_lookup
@_scalar_bar_slot_lookup.setter
def _scalar_bar_slot_lookup(self, value):
"""Set the scalar bar slot lookup of the active renderer."""
self.renderer._scalar_bar_slot_lookup = value
@property
def scale(self):
"""Return the scaling of the active renderer."""
return self.renderer.scale
@scale.setter
def scale(self, scale):
"""Set the scaling of the active renderer."""
self.renderer.set_scale(*scale)
@property
def camera_position(self):
"""Return camera position of the active render window."""
return self.renderer.camera_position
@camera_position.setter
def camera_position(self, camera_location):
"""Set camera position of the active render window."""
self.renderer.camera_position = camera_location
@property
def background_color(self):
"""Return the background color of the active render window."""
return self.renderers.active_renderer.background_color
@background_color.setter
def background_color(self, color):
"""Set the background color of all the render windows."""
self.set_background(color)
@property
def window_size(self):
"""Return the render window size in ``(width, height)``.
Examples
--------
Change the window size from ``200 x 200`` to ``400 x 400``.
>>> import pyvista
>>> pl = pyvista.Plotter(window_size=[200, 200])
>>> pl.window_size
[200, 200]
>>> pl.window_size = [400, 400]
>>> pl.window_size
[400, 400]
"""
return list(self.ren_win.GetSize())
@window_size.setter
def window_size(self, window_size):
"""Set the render window size."""
self.ren_win.SetSize(window_size[0], window_size[1])
@property
def image_depth(self):
"""Return a depth image representing current render window.
Helper attribute for ``get_image_depth``.
"""
return self.get_image_depth()
def _check_rendered(self):
"""Check if the render window has been shown and raise an exception if not."""
if not self._rendered:
raise AttributeError(
'\nThis plotter has not yet been set up and rendered '
'with ``show()``.\n'
'Consider setting ``off_screen=True`` '
'for off screen rendering.\n'
)
def _check_has_ren_win(self):
"""Check if render window attribute exists and raise an exception if not."""
if not hasattr(self, 'ren_win'):
raise AttributeError(
'\n\nTo retrieve an image after the render window '
'has been closed, set:\n\n'
' ``plotter.store_image = True``\n\n'
'before closing the plotter.'
)
@property
def image(self):
"""Return an image array of current render window.
To retrieve an image after the render window has been closed,
set: ``plotter.store_image = True`` before closing the plotter.
"""
if not hasattr(self, 'ren_win') and self.last_image is not None:
return self.last_image
self._check_rendered()
self._check_has_ren_win()
data = image_from_window(self.ren_win)
if self.image_transparent_background:
return data
# ignore alpha channel
return data[:, :, :-1]
def render(self):
"""Render the main window.
Does nothing until ``show`` has been called.
"""
if hasattr(self, 'ren_win') and not self._first_time:
log.debug('Rendering')
self.ren_win.Render()
self._rendered = True
@wraps(RenderWindowInteractor.add_key_event)
def add_key_event(self, *args, **kwargs):
"""Wrap RenderWindowInteractor.add_key_event."""
if hasattr(self, 'iren'):
self.iren.add_key_event(*args, **kwargs)
def clear_events_for_key(self, key):
"""Remove the callbacks associated to the key.
Parameters
----------
key : str
Key to clear events for.
"""
self.iren.clear_events_for_key(key)
def store_mouse_position(self, *args):
"""Store mouse position."""
if not hasattr(self, "iren"):
raise AttributeError("This plotting window is not interactive.")
self.mouse_position = self.iren.get_event_position()
def store_click_position(self, *args):
"""Store click position in viewport coordinates."""
if not hasattr(self, "iren"):
raise AttributeError("This plotting window is not interactive.")
self.click_position = self.iren.get_event_position()
self.mouse_position = self.click_position
def track_mouse_position(self):
"""Keep track of the mouse position.
This will potentially slow down the interactor. No callbacks
supported here - use
:func:`pyvista.BasePlotter.track_click_position` instead.
"""
self.iren.track_mouse_position(self.store_mouse_position)
def untrack_mouse_position(self):
"""Stop tracking the mouse position."""
self.iren.untrack_mouse_position()
@wraps(RenderWindowInteractor.track_click_position)
def track_click_position(self, *args, **kwargs):
"""Wrap RenderWindowInteractor.track_click_position."""
self.iren.track_click_position(*args, **kwargs)
@wraps(RenderWindowInteractor.untrack_click_position)
def untrack_click_position(self, *args, **kwargs):
"""Stop tracking the click position."""
self.iren.untrack_click_position(*args, **kwargs)
@property
def pickable_actors(self):
"""Return or set the pickable actors.
When setting, this will be the list of actors to make
pickable. All actors not in the list will be made unpickable.
If ``actors`` is ``None``, all actors will be made unpickable.
Returns
-------
list of vtk.vtkActors
Examples
--------
Add two actors to a :class:`pyvista.Plotter`, make one
pickable, and then list the pickable actors.
>>> import pyvista as pv
>>> pl = pv.Plotter()
>>> sphere_actor = pl.add_mesh(pv.Sphere())
>>> cube_actor = pl.add_mesh(pv.Cube(), pickable=False, style='wireframe')
>>> len(pl.pickable_actors)
1
Set the pickable actors to both actors.
>>> pl.pickable_actors = [sphere_actor, cube_actor]
>>> len(pl.pickable_actors)
2
Set the pickable actors to ``None``.
>>> pl.pickable_actors = None
>>> len(pl.pickable_actors)
0
"""
pickable = []
for renderer in self.renderers:
for actor in renderer.actors.values():
if actor.GetPickable():
pickable.append(actor)
return pickable
@pickable_actors.setter
def pickable_actors(self, actors=None):
"""Set the pickable actors."""
actors = [] if actors is None else actors
if isinstance(actors, _vtk.vtkActor):
actors = [actors]
if not all([isinstance(actor, _vtk.vtkActor) for actor in actors]):
raise TypeError(
f'Expected a vtkActor instance or a list of vtkActors, got '
f'{[type(actor) for actor in actors]} instead.'
)
for renderer in self.renderers:
for actor in renderer.actors.values():
actor.SetPickable(actor in actors)
def _prep_for_close(self):
"""Make sure a screenshot is acquired before closing.
This doesn't actually close anything! It just preps the plotter for
closing.
"""
# Grab screenshot right before renderer closes
self.last_image = self.screenshot(True, return_img=True)
self.last_image_depth = self.get_image_depth()
def increment_point_size_and_line_width(self, increment):
"""Increment point size and line width of all actors.
For every actor in the scene, increment both its point size
and line width by the given value.
Parameters
----------
increment : float
Amount to increment point size and line width.
"""
for renderer in self.renderers:
for actor in renderer._actors.values():
if hasattr(actor, "GetProperty"):
prop = actor.GetProperty()
if hasattr(prop, "SetPointSize"):
prop.SetPointSize(prop.GetPointSize() + increment)
if hasattr(prop, "SetLineWidth"):
prop.SetLineWidth(prop.GetLineWidth() + increment)
self.render()
return
def reset_key_events(self):
"""Reset all of the key press events to their defaults."""
if hasattr(self, 'iren'):
self.iren.clear_key_event_callbacks()
self.add_key_event('q', self._prep_for_close) # Add no matter what
b_left_down_callback = lambda: self.iren.add_observer(
'LeftButtonPressEvent', self.left_button_down
)
self.add_key_event('b', b_left_down_callback)
self.add_key_event('v', lambda: self.isometric_view_interactive())
self.add_key_event('C', lambda: self.enable_cell_picking())
self.add_key_event('Up', lambda: self.camera.Zoom(1.05))
self.add_key_event('Down', lambda: self.camera.Zoom(0.95))
self.add_key_event('plus', lambda: self.increment_point_size_and_line_width(1))
self.add_key_event('minus', lambda: self.increment_point_size_and_line_width(-1))
@wraps(RenderWindowInteractor.key_press_event)
def key_press_event(self, *args, **kwargs):
"""Wrap RenderWindowInteractor.key_press_event."""
self.iren.key_press_event(*args, **kwargs)
def left_button_down(self, obj, event_type):
"""Register the event for a left button down click."""
if hasattr(self.ren_win, 'GetOffScreenFramebuffer'):
if not self.ren_win.GetOffScreenFramebuffer().GetFBOIndex():
# must raise a runtime error as this causes a segfault on VTK9
raise ValueError('Invoking helper with no framebuffer')
# Get 2D click location on window
click_pos = self.iren.get_event_position()
# Get corresponding click location in the 3D plot
picker = _vtk.vtkWorldPointPicker()
picker.Pick(click_pos[0], click_pos[1], 0, self.renderer)
self.pickpoint = np.asarray(picker.GetPickPosition()).reshape((-1, 3))
if np.any(np.isnan(self.pickpoint)):
self.pickpoint[:] = 0
@wraps(RenderWindowInteractor.enable_trackball_style)
def enable_trackball_style(self):
"""Wrap RenderWindowInteractor.enable_trackball_style."""
self.iren.enable_trackball_style()
@wraps(RenderWindowInteractor.enable_trackball_actor_style)
def enable_trackball_actor_style(self):
"""Wrap RenderWindowInteractor.enable_trackball_actor_style."""
self.iren.enable_trackball_actor_style()
@wraps(RenderWindowInteractor.enable_image_style)
def enable_image_style(self):
"""Wrap RenderWindowInteractor.enable_image_style."""
self.iren.enable_image_style()
@wraps(RenderWindowInteractor.enable_joystick_style)
def enable_joystick_style(self):
"""Wrap RenderWindowInteractor.enable_joystick_style."""
self.iren.enable_joystick_style()
@wraps(RenderWindowInteractor.enable_joystick_actor_style)
def enable_joystick_actor_style(self):
"""Wrap RenderWindowInteractor.enable_joystick_actor_style."""
self.iren.enable_joystick_actor_style()
@wraps(RenderWindowInteractor.enable_zoom_style)
def enable_zoom_style(self):
"""Wrap RenderWindowInteractor.enable_zoom_style."""
self.iren.enable_zoom_style()
@wraps(RenderWindowInteractor.enable_terrain_style)
def enable_terrain_style(self, *args, **kwargs):
"""Wrap RenderWindowInteractor.enable_terrain_style."""
self.iren.enable_terrain_style(*args, **kwargs)
@wraps(RenderWindowInteractor.enable_rubber_band_style)
def enable_rubber_band_style(self):
"""Wrap RenderWindowInteractor.enable_rubber_band_style."""
self.iren.enable_rubber_band_style()
@wraps(RenderWindowInteractor.enable_rubber_band_2d_style)
def enable_rubber_band_2d_style(self):
"""Wrap RenderWindowInteractor.enable_rubber_band_2d_style."""
self.iren.enable_rubber_band_2d_style()
def enable_stereo_render(self):
"""Enable stereo rendering.
Disable this with :func:`disable_stereo_render
<BasePlotter.disable_stereo_render>`
Examples
--------
Enable stereo rendering to show a cube as an anaglyph image.
>>> import pyvista as pv
>>> pl = pv.Plotter()
>>> _ = pl.add_mesh(pv.Cube())
>>> pl.enable_stereo_render()
>>> pl.show()
"""
if hasattr(self, 'ren_win'):
self.ren_win.StereoRenderOn()
self.ren_win.SetStereoTypeToAnaglyph()
def disable_stereo_render(self):
"""Disable stereo rendering.
Enable again with :func:`enable_stereo_render
<BasePlotter.enable_stereo_render>`
Examples
--------
Enable and then disable stereo rendering. It should show a simple cube.
>>> import pyvista as pv
>>> pl = pv.Plotter()
>>> _ = pl.add_mesh(pv.Cube())
>>> pl.enable_stereo_render()
>>> pl.disable_stereo_render()
>>> pl.show()
"""
if hasattr(self, 'ren_win'):
self.ren_win.StereoRenderOff()
def hide_axes_all(self):
"""Hide the axes orientation widget in all renderers."""
for renderer in self.renderers:
renderer.hide_axes()
def show_axes_all(self):
"""Show the axes orientation widget in all renderers."""
for renderer in self.renderers:
renderer.show_axes()
def isometric_view_interactive(self):
"""Set the current interactive render window to isometric view."""
interactor = self.iren.get_interactor_style()
renderer = interactor.GetCurrentRenderer()
if renderer is None:
renderer = self.renderer
renderer.view_isometric()
def update(self, stime=1, force_redraw=True):
"""Update window, redraw, process messages query.
Parameters
----------
stime : int, optional
Duration of timer that interrupt vtkRenderWindowInteractor
in milliseconds.
force_redraw : bool, optional
Call ``render`` immediately.
"""
if stime <= 0:
stime = 1
curr_time = time.time()
if Plotter.last_update_time > curr_time:
Plotter.last_update_time = curr_time
if self.iren is not None:
update_rate = self.iren.get_desired_update_rate()
if (curr_time - Plotter.last_update_time) > (1.0 / update_rate):
self.right_timer_id = self.iren.create_repeating_timer(stime)
self.render()
Plotter.last_update_time = curr_time
return
if force_redraw:
self.render()
def add_mesh(
self,
mesh,
color=None,
style=None,
scalars=None,
clim=None,
show_edges=None,
edge_color=None,
point_size=5.0,
line_width=None,
opacity=1.0,
flip_scalars=False,
lighting=None,
n_colors=256,
interpolate_before_map=True,
cmap=None,
label=None,
reset_camera=None,
scalar_bar_args=None,
show_scalar_bar=None,
multi_colors=False,
name=None,
texture=None,
render_points_as_spheres=None,
render_lines_as_tubes=False,
smooth_shading=None,
split_sharp_edges=False,
ambient=0.0,
diffuse=1.0,
specular=0.0,
specular_power=100.0,
nan_color=None,
nan_opacity=1.0,
culling=None,
rgb=None,
categories=False,
silhouette=False,
use_transparency=False,
below_color=None,
above_color=None,
annotations=None,
pickable=True,
preference="point",
log_scale=False,
pbr=False,
metallic=0.0,
roughness=0.5,
render=True,
component=None,
**kwargs,
):
"""Add any PyVista/VTK mesh or dataset that PyVista can wrap to the scene.
This method is using a mesh representation to view the surfaces
and/or geometry of datasets. For volume rendering, see
:func:`pyvista.BasePlotter.add_volume`.
Parameters
----------
mesh : pyvista.DataSet or pyvista.MultiBlock
Any PyVista or VTK mesh is supported. Also, any dataset
that :func:`pyvista.wrap` can handle including NumPy
arrays of XYZ points.
color : color_like, optional, defaults to white
Use to make the entire mesh have a single solid color.
Either a string, RGB list, or hex color string. For example:
``color='white'``, ``color='w'``, ``color=[1.0, 1.0, 1.0]``, or
``color='#FFFFFF'``. Color will be overridden if scalars are
specified.
style : str, optional
Visualization style of the mesh. One of the following:
``style='surface'``, ``style='wireframe'``, ``style='points'``.
Defaults to ``'surface'``. Note that ``'wireframe'`` only shows a
wireframe of the outer geometry.
scalars : str or numpy.ndarray, optional
Scalars used to "color" the mesh. Accepts a string name
of an array that is present on the mesh or an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If both
``color`` and ``scalars`` are ``None``, then the active
scalars are used.
clim : 2 item list, optional
Color bar range for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``rng``
is also an accepted alias for this.
show_edges : bool, optional
Shows the edges of a mesh. Does not apply to a wireframe
representation.
edge_color : color_like, optional, defaults to black
The solid color to give the edges when ``show_edges=True``.
Either a string, RGB list, or hex color string.
point_size : float, optional
Point size of any nodes in the dataset plotted. Also
applicable when style='points'. Default ``5.0``.
line_width : float, optional
Thickness of lines. Only valid for wireframe and surface
representations. Default None.
opacity : float, str, array-like
Opacity of the mesh. If a single float value is given, it
will be the global opacity of the mesh and uniformly
applied everywhere - should be between 0 and 1. A string
can also be specified to map the scalars range to a
predefined opacity transfer function (options include:
``'linear'``, ``'linear_r'``, ``'geom'``, ``'geom_r'``).
A string could also be used to map a scalars array from
the mesh to the opacity (must have same number of elements
as the ``scalars`` argument). Or you can pass a custom
made transfer function that is an array either
``n_colors`` in length or shorter.
flip_scalars : bool, optional
Flip direction of cmap. Most colormaps allow ``*_r``
suffix to do this as well.
lighting : bool, optional
Enable or disable view direction lighting. Default ``False``.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
interpolate_before_map : bool, optional
Enabling makes for a smoother scalars display. Default is
``True``. When ``False``, OpenGL will interpolate the
mapped colors which can result is showing colors that are
not present in the color map.
cmap : str, list, optional
Name of the Matplotlib colormap to use when mapping the
``scalars``. See available Matplotlib colormaps. Only
applicable for when displaying ``scalars``. Requires
Matplotlib to be installed. ``colormap`` is also an
accepted alias for this. If ``colorcet`` or ``cmocean``
are installed, their colormaps can be specified by name.
You can also specify a list of colors to override an
existing colormap with a custom one. For example, to
create a three color colormap you might specify
``['green', 'red', 'blue']``.
label : str, optional
String label to use when adding a legend to the scene with
:func:`pyvista.BasePlotter.add_legend`.
reset_camera : bool, optional
Reset the camera after adding this mesh to the scene.
scalar_bar_args : dict, optional
Dictionary of keyword arguments to pass when adding the
scalar bar to the scene. For options, see
:func:`pyvista.BasePlotter.add_scalar_bar`.
show_scalar_bar : bool
If ``False``, a scalar bar will not be added to the
scene. Defaults to ``True``.
multi_colors : bool, optional
If a ``MultiBlock`` dataset is given this will color each
block by a solid color using matplotlib's color cycler.
name : str, optional
The name for the added mesh/actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
texture : vtk.vtkTexture or np.ndarray or bool, optional
A texture to apply if the input mesh has texture
coordinates. This will not work with MultiBlock
datasets. If set to ``True``, the first available texture
on the object will be used. If a string name is given, it
will pull a texture with that name associated to the input
mesh.
render_points_as_spheres : bool, optional
Render points as spheres rather than dots.
render_lines_as_tubes : bool, optional
Show lines as thick tubes rather than flat lines. Control
the width with ``line_width``.
smooth_shading : bool, optional
Enable smooth shading when ``True`` using either the
Gouraud or Phong shading algorithm. When ``False``, use
flat shading. Automatically enabled when ``pbr=True``.
See :ref:`shading_example`.
split_sharp_edges : bool, optional
Split sharp edges exceeding 30 degrees when plotting with
smooth shading. Control the angle with the optional
keyword argument ``feature_angle``. By default this is
``False``. Note that enabling this will create a copy of
the input mesh within the plotter. See
:ref:`shading_example`.
ambient : float, optional
When lighting is enabled, this is the amount of light in
the range of 0 to 1 (default 0.0) that reaches the actor
when not directed at the light source emitted from the
viewer.
diffuse : float, optional
The diffuse lighting coefficient. Default 1.0.
specular : float, optional
The specular lighting coefficient. Default 0.0.
specular_power : float, optional
The specular power. Between 0.0 and 128.0.
nan_color : color_like, optional, defaults to gray
The color to use for all ``NaN`` values in the plotted
scalar array.
nan_opacity : float, optional
Opacity of ``NaN`` values. Should be between 0 and 1.
Default 1.0.
culling : str, optional
Does not render faces that are culled. Options are
``'front'`` or ``'back'``. This can be helpful for dense
surface meshes, especially when edges are visible, but can
cause flat meshes to be partially displayed. Defaults to
``False``.
rgb : bool, optional
If an 2 dimensional array is passed as the scalars, plot
those values as RGB(A) colors. ``rgba`` is also an
accepted alias for this. Opacity (the A) is optional. If
a scalars array ending with ``"_rgba"`` is passed, the default
becomes ``True``. This can be overridden by setting this
parameter to ``False``.
categories : bool, optional
If set to ``True``, then the number of unique values in
the scalar array will be used as the ``n_colors``
argument.
silhouette : dict, bool, optional
If set to ``True``, plot a silhouette highlight for the
mesh. This feature is only available for a triangulated
``PolyData``. As a ``dict``, it contains the properties
of the silhouette to display:
* ``color``: ``color_like``, color of the silhouette
* ``line_width``: ``float``, edge width
* ``opacity``: ``float`` between 0 and 1, edge transparency
* ``feature_angle``: If a ``float``, display sharp edges
exceeding that angle in degrees.
* ``decimate``: ``float`` between 0 and 1, level of decimation
use_transparency : bool, optional
Invert the opacity mappings and make the values correspond
to transparency.
below_color : color_like, optional
Solid color for values below the scalars range
(``clim``). This will automatically set the scalar bar
``below_label`` to ``'Below'``.
above_color : color_like, optional
Solid color for values below the scalars range
(``clim``). This will automatically set the scalar bar
``above_label`` to ``'Above'``.
annotations : dict, optional
Pass a dictionary of annotations. Keys are the float
values in the scalars range to annotate on the scalar bar
and the values are the the string annotations.
pickable : bool, optional
Set whether this actor is pickable.
preference : str, optional
When ``mesh.n_points == mesh.n_cells`` and setting
scalars, this parameter sets how the scalars will be
mapped to the mesh. Default ``'points'``, causes the
scalars will be associated with the mesh points. Can be
either ``'points'`` or ``'cells'``.
log_scale : bool, optional
Use log scale when mapping data to colors. Scalars less
than zero are mapped to the smallest representable
positive float. Default: ``True``.
pbr : bool, optional
Enable physics based rendering (PBR) if the mesh is
``PolyData``. Use the ``color`` argument to set the base
color. This is only available in VTK>=9.
metallic : float, optional
Usually this value is either 0 or 1 for a real material
but any value in between is valid. This parameter is only
used by PBR interpolation. Default value is 0.0.
roughness : float, optional
This value has to be between 0 (glossy) and 1 (rough). A
glossy material has reflections and a high specular
part. This parameter is only used by PBR
interpolation. Default value is 0.5.
render : bool, optional
Force a render when ``True``. Default ``True``.
component : int, optional
Set component of vector valued scalars to plot. Must be
nonnegative, if supplied. If ``None``, the magnitude of
the vector is plotted.
**kwargs : dict, optional
Optional developer keyword arguments.
Returns
-------
vtk.vtkActor
VTK actor of the mesh.
Examples
--------
Add a sphere to the plotter and show it with a custom scalar
bar title.
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> sphere['Data'] = sphere.points[:, 2]
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(sphere,
... scalar_bar_args={'title': 'Z Position'})
>>> plotter.show()
Plot using RGB on a single cell. Note that since the number of
points and the number of cells are identical, we have to pass
``preference='cell'``.
>>> import pyvista
>>> import numpy as np
>>> vertices = np.array([[0, 0, 0], [1, 0, 0], [.5, .667, 0], [0.5, .33, 0.667]])
>>> faces = np.hstack([[3, 0, 1, 2], [3, 0, 3, 2], [3, 0, 1, 3], [3, 1, 2, 3]])
>>> mesh = pyvista.PolyData(vertices, faces)
>>> mesh.cell_data['colors'] = [[255, 255, 255],
... [0, 255, 0],
... [0, 0, 255],
... [255, 0, 0]]
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh, scalars='colors', lighting=False,
... rgb=True, preference='cell')
>>> plotter.camera_position='xy'
>>> plotter.show()
Note how this varies from ``preference=='point'``. This is
because each point is now being individually colored, versus
in ``preference=='point'``, each cell face is individually
colored.
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh, scalars='colors', lighting=False,
... rgb=True, preference='point')
>>> plotter.camera_position='xy'
>>> plotter.show()
Plot a plane with a constant color and vary its opacity by point.
>>> plane = pyvista.Plane()
>>> plane.plot(color='b', opacity=np.linspace(0, 1, plane.n_points),
... show_edges=True)
"""
self.mapper = make_mapper(_vtk.vtkDataSetMapper)
# Convert the VTK data object to a pyvista wrapped object if necessary
if not is_pyvista_dataset(mesh):
mesh = wrap(mesh)
if not is_pyvista_dataset(mesh):
raise TypeError(
f'Object type ({type(mesh)}) not supported for plotting in PyVista.'
)
##### Parse arguments to be used for all meshes #####
# Avoid mutating input
if scalar_bar_args is None:
scalar_bar_args = {'n_colors': n_colors}
else:
scalar_bar_args = scalar_bar_args.copy()
if show_edges is None:
show_edges = self._theme.show_edges
if show_scalar_bar is None:
show_scalar_bar = self._theme.show_scalar_bar
if lighting is None:
lighting = self._theme.lighting
if smooth_shading is None:
if pbr:
smooth_shading = True
else:
smooth_shading = self._theme.smooth_shading
# supported aliases
clim = kwargs.pop('rng', clim)
cmap = kwargs.pop('colormap', cmap)
culling = kwargs.pop("backface_culling", culling)
if render_points_as_spheres is None:
render_points_as_spheres = self._theme.render_points_as_spheres
if name is None:
name = f'{type(mesh).__name__}({mesh.memory_address})'
nan_color = Color(
nan_color, default_opacity=nan_opacity, default_color=self._theme.nan_color
)
if color is True:
color = self._theme.color
if texture is False:
texture = None
if culling is True:
culling = 'backface'
rgb = kwargs.pop('rgba', rgb)
feature_angle = kwargs.pop('feature_angle', 30)
# account for legacy behavior
if 'stitle' in kwargs: # pragma: no cover
warnings.warn(USE_SCALAR_BAR_ARGS, PyvistaDeprecationWarning)
scalar_bar_args.setdefault('title', kwargs.pop('stitle'))
if "scalar" in kwargs:
raise TypeError(
"`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?"
)
assert_empty_kwargs(**kwargs)
##### Handle composite datasets #####
if isinstance(mesh, pyvista.MultiBlock):
# first check the scalars
if clim is None and scalars is not None:
# Get the data range across the array for all blocks
# if scalars specified
if isinstance(scalars, str):
clim = mesh.get_data_range(scalars)
else:
# TODO: an array was given... how do we deal with
# that? Possibly a 2D arrays or list of
# arrays where first index corresponds to
# the block? This could get complicated real
# quick.
raise TypeError(
'scalars array must be given as a string name for multiblock datasets.'
)
the_arguments = locals()
the_arguments.pop('self')
the_arguments.pop('mesh')
the_arguments.pop('kwargs')
if multi_colors:
# Compute unique colors for each index of the block
if _has_matplotlib():
from itertools import cycle
import matplotlib
cycler = matplotlib.rcParams['axes.prop_cycle']
colors = cycle(cycler)
else:
multi_colors = False
logging.warning('Please install matplotlib for color cycles')
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(mesh.GetNumberOfBlocks()):
if mesh[idx] is None:
continue
# Get a good name to use
next_name = f'{name}-{idx}'
# Get the data object
if not is_pyvista_dataset(mesh[idx]):
data = wrap(mesh.GetBlock(idx))
if not is_pyvista_dataset(mesh[idx]):
continue # move on if we can't plot it
else:
data = mesh.GetBlock(idx)
if data is None or (not isinstance(data, pyvista.MultiBlock) and data.n_points < 1):
# Note that a block can exist but be None type
# or it could have zeros points (be empty) after filtering
continue
# Now check that scalars is available for this dataset
if isinstance(data, _vtk.vtkMultiBlockDataSet) or get_array(data, scalars) is None:
ts = None
else:
ts = scalars
if multi_colors:
color = next(colors)['color']
## Add to the scene
the_arguments['color'] = color
the_arguments['scalars'] = ts
the_arguments['name'] = next_name
the_arguments['texture'] = None
a = self.add_mesh(data, **the_arguments)
actors.append(a)
if (reset_camera is None and not self.camera_set) or reset_camera:
cpos = self.get_default_cam_pos()
self.camera_position = cpos
self.camera_set = False
self.reset_camera()
return actors
##### Plot a single PyVista mesh #####
if silhouette:
if isinstance(silhouette, dict):
self.add_silhouette(mesh, silhouette)
else:
self.add_silhouette(mesh)
# Try to plot something if no preference given
if scalars is None and color is None and texture is None:
# Prefer texture first
if len(list(mesh.textures.keys())) > 0:
texture = True
# If no texture, plot any active scalar
else:
# Make sure scalars components are not vectors/tuples
scalars = mesh.active_scalars_name
# Don't allow plotting of string arrays by default
if scalars is not None: # and np.issubdtype(mesh.active_scalars.dtype, np.number):
scalar_bar_args.setdefault('title', scalars)
else:
scalars = None
# Make sure scalars is a numpy array after this point
original_scalar_name = None
if isinstance(scalars, str):
self.mapper.SetArrayName(scalars)
# enable rgb if the scalars name ends with rgb or rgba
if rgb is None:
if scalars.endswith('_rgb') or scalars.endswith('_rgba'):
rgb = True
original_scalar_name = scalars
scalars = get_array(mesh, scalars, preference=preference, err=True)
scalar_bar_args.setdefault('title', original_scalar_name)
# Compute surface normals if using smooth shading
if smooth_shading:
mesh, scalars = prepare_smooth_shading(
mesh, scalars, texture, split_sharp_edges, feature_angle, preference
)
if mesh.n_points < 1:
raise ValueError('Empty meshes cannot be plotted. Input mesh has zero points.')
# set main values
self.mesh = mesh
self.mapper.SetInputData(self.mesh)
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
actor = _vtk.vtkActor()
prop = _vtk.vtkProperty()
actor.SetMapper(self.mapper)
actor.SetProperty(prop)
if texture is True or isinstance(texture, (str, int)):
texture = mesh._activate_texture(texture)
if texture:
if isinstance(texture, np.ndarray):
texture = numpy_to_texture(texture)
if not isinstance(texture, (_vtk.vtkTexture, _vtk.vtkOpenGLTexture)):
raise TypeError(f'Invalid texture type ({type(texture)})')
if mesh.GetPointData().GetTCoords() is None:
raise ValueError(
'Input mesh does not have texture coordinates to support the texture.'
)
actor.SetTexture(texture)
# Set color to white by default when using a texture
if color is None:
color = 'white'
if scalars is None:
show_scalar_bar = False
self.mapper.SetScalarModeToUsePointFieldData()
# see https://github.com/pyvista/pyvista/issues/950
mesh.set_active_scalars(None)
# Handle making opacity array
custom_opac, opacity = process_opacity(
mesh, opacity, preference, n_colors, scalars, use_transparency
)
# Scalars formatting ==================================================
if scalars is not None:
show_scalar_bar, n_colors, clim = self.mapper.set_scalars(
mesh,
scalars,
scalar_bar_args,
rgb,
component,
preference,
interpolate_before_map,
custom_opac,
annotations,
log_scale,
nan_color,
above_color,
below_color,
cmap,
flip_scalars,
opacity,
categories,
n_colors,
clim,
self._theme,
show_scalar_bar,
)
elif custom_opac: # no scalars but custom opacity
self.mapper.set_custom_opacity(
opacity,
color,
mesh,
n_colors,
preference,
interpolate_before_map,
rgb,
self._theme,
)
else:
self.mapper.SetScalarModeToUseFieldData()
# Set actor properties ================================================
# select view style
if not style:
style = 'surface'
style = style.lower()
if style == 'wireframe':
prop.SetRepresentationToWireframe()
if color is None:
color = self._theme.outline_color
elif style == 'points':
prop.SetRepresentationToPoints()
elif style == 'surface':
prop.SetRepresentationToSurface()
else:
raise ValueError(
'Invalid style. Must be one of the following:\n'
'\t"surface"\n'
'\t"wireframe"\n'
'\t"points"\n'
)
prop.SetPointSize(point_size)
prop.SetAmbient(ambient)
prop.SetDiffuse(diffuse)
prop.SetSpecular(specular)
prop.SetSpecularPower(specular_power)
if pbr:
if not _vtk.VTK9: # pragma: no cover
raise RuntimeError('Physically based rendering requires VTK 9 ' 'or newer')
prop.SetInterpolationToPBR()
prop.SetMetallic(metallic)
prop.SetRoughness(roughness)
elif smooth_shading:
prop.SetInterpolationToPhong()
else:
prop.SetInterpolationToFlat()
# edge display style
if show_edges:
prop.EdgeVisibilityOn()
rgb_color = Color(color, default_color=self._theme.color)
prop.SetColor(rgb_color.float_rgb)
if isinstance(opacity, (float, int)):
prop.SetOpacity(opacity)
prop.SetEdgeColor(Color(edge_color, default_color=self._theme.edge_color).float_rgb)
if render_points_as_spheres:
prop.SetRenderPointsAsSpheres(render_points_as_spheres)
if render_lines_as_tubes:
prop.SetRenderLinesAsTubes(render_lines_as_tubes)
# legend label
if label:
if not isinstance(label, str):
raise TypeError('Label must be a string')
geom = pyvista.Triangle()
if scalars is not None:
geom = pyvista.Box()
rgb_color = Color('black')
geom.points -= geom.center
addr = actor.GetAddressAsString("")
self.renderer._labels[addr] = [geom, label, rgb_color]
# lighting display style
if not lighting:
prop.LightingOff()
# set line thickness
if line_width:
prop.SetLineWidth(line_width)
self.add_actor(
actor,
reset_camera=reset_camera,
name=name,
culling=culling,
pickable=pickable,
render=render,
)
# hide scalar bar if using special scalars
if scalar_bar_args.get('title') == '__custom_rgba':
show_scalar_bar = False
# Only show scalar bar if there are scalars
if show_scalar_bar and scalars is not None:
self.add_scalar_bar(**scalar_bar_args)
self.renderer.Modified()
return actor
def add_volume(
self,
volume,
scalars=None,
clim=None,
resolution=None,
opacity='linear',
n_colors=256,
cmap=None,
flip_scalars=False,
reset_camera=None,
name=None,
ambient=0.0,
categories=False,
culling=False,
multi_colors=False,
blending='composite',
mapper=None,
scalar_bar_args=None,
show_scalar_bar=None,
annotations=None,
pickable=True,
preference="point",
opacity_unit_distance=None,
shade=False,
diffuse=0.7,
specular=0.2,
specular_power=10.0,
render=True,
**kwargs,
):
"""Add a volume, rendered using a smart mapper by default.
Requires a 3D :class:`numpy.ndarray` or :class:`pyvista.UniformGrid`.
Parameters
----------
volume : 3D numpy.ndarray or pyvista.UniformGrid
The input volume to visualize. 3D numpy arrays are accepted.
scalars : str or numpy.ndarray, optional
Scalars used to "color" the mesh. Accepts a string name of an
array that is present on the mesh or an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If ``scalars`` is
``None``, then the active scalars are used.
clim : 2 item list, optional
Color bar range for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``rng``
is also an accepted alias for this.
resolution : list, optional
Block resolution.
opacity : str or numpy.ndarray, optional
Opacity mapping for the scalars array.
A string can also be specified to map the scalars range to a
predefined opacity transfer function (options include: 'linear',
'linear_r', 'geom', 'geom_r'). Or you can pass a custom made
transfer function that is an array either ``n_colors`` in length or
shorter.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
cmap : str, optional
Name of the Matplotlib colormap to us when mapping the ``scalars``.
See available Matplotlib colormaps. Only applicable for when
displaying ``scalars``. Requires Matplotlib to be installed.
``colormap`` is also an accepted alias for this. If ``colorcet`` or
``cmocean`` are installed, their colormaps can be specified by name.
flip_scalars : bool, optional
Flip direction of cmap. Most colormaps allow ``*_r`` suffix to do
this as well.
reset_camera : bool, optional
Reset the camera after adding this mesh to the scene.
name : str, optional
The name for the added actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
ambient : float, optional
When lighting is enabled, this is the amount of light from
0 to 1 that reaches the actor when not directed at the
light source emitted from the viewer. Default 0.0.
categories : bool, optional
If set to ``True``, then the number of unique values in the scalar
array will be used as the ``n_colors`` argument.
culling : str, optional
Does not render faces that are culled. Options are ``'front'`` or
``'back'``. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Defaults ``False``.
multi_colors : bool, optional
Whether or not to use multiple colors when plotting MultiBlock
object. Blocks will be colored sequentially as 'Reds', 'Greens',
'Blues', and 'Grays'.
blending : str, optional
Blending mode for visualisation of the input object(s). Can be
one of 'additive', 'maximum', 'minimum', 'composite', or
'average'. Defaults to 'additive'.
mapper : str, optional
Volume mapper to use given by name. Options include:
``'fixed_point'``, ``'gpu'``, ``'open_gl'``, and
``'smart'``. If ``None`` the ``"volume_mapper"`` in the
``self._theme`` is used.
scalar_bar_args : dict, optional
Dictionary of keyword arguments to pass when adding the
scalar bar to the scene. For options, see
:func:`pyvista.BasePlotter.add_scalar_bar`.
show_scalar_bar : bool
If ``False``, a scalar bar will not be added to the
scene. Defaults to ``True``.
annotations : dict, optional
Pass a dictionary of annotations. Keys are the float
values in the scalars range to annotate on the scalar bar
and the values are the the string annotations.
pickable : bool, optional
Set whether this mesh is pickable.
preference : str, optional
When ``mesh.n_points == mesh.n_cells`` and setting
scalars, this parameter sets how the scalars will be
mapped to the mesh. Default ``'points'``, causes the
scalars will be associated with the mesh points. Can be
either ``'points'`` or ``'cells'``.
opacity_unit_distance : float
Set/Get the unit distance on which the scalar opacity
transfer function is defined. Meaning that over that
distance, a given opacity (from the transfer function) is
accumulated. This is adjusted for the actual sampling
distance during rendering. By default, this is the length
of the diagonal of the bounding box of the volume divided
by the dimensions.
shade : bool
Default off. If shading is turned on, the mapper may
perform shading calculations - in some cases shading does
not apply (for example, in a maximum intensity projection)
and therefore shading will not be performed even if this
flag is on.
diffuse : float, optional
The diffuse lighting coefficient. Default ``1.0``.
specular : float, optional
The specular lighting coefficient. Default ``0.0``.
specular_power : float, optional
The specular power. Between ``0.0`` and ``128.0``.
render : bool, optional
Force a render when True. Default ``True``.
**kwargs : dict, optional
Optional keyword arguments.
Returns
-------
vtk.vtkActor
VTK actor of the volume.
Examples
--------
Show a built-in volume example with the coolwarm colormap.
>>> from pyvista import examples
>>> import pyvista as pv
>>> bolt_nut = examples.download_bolt_nut()
>>> pl = pv.Plotter()
>>> _ = pl.add_volume(bolt_nut, cmap="coolwarm")
>>> pl.show()
"""
# Handle default arguments
# Supported aliases
clim = kwargs.pop('rng', clim)
cmap = kwargs.pop('colormap', cmap)
culling = kwargs.pop("backface_culling", culling)
if "scalar" in kwargs:
raise TypeError(
"`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?"
)
assert_empty_kwargs(**kwargs)
# Avoid mutating input
if scalar_bar_args is None:
scalar_bar_args = {}
else:
scalar_bar_args = scalar_bar_args.copy()
# account for legacy behavior
if 'stitle' in kwargs: # pragma: no cover
warnings.warn(USE_SCALAR_BAR_ARGS, PyvistaDeprecationWarning)
scalar_bar_args.setdefault('title', kwargs.pop('stitle'))
if show_scalar_bar is None:
show_scalar_bar = self._theme.show_scalar_bar
if culling is True:
culling = 'backface'
if mapper is None:
mapper = self._theme.volume_mapper
# only render when the plotter has already been shown
if render is None:
render = not self._first_time
# Convert the VTK data object to a pyvista wrapped object if necessary
if not is_pyvista_dataset(volume):
if isinstance(volume, np.ndarray):
volume = wrap(volume)
if resolution is None:
resolution = [1, 1, 1]
elif len(resolution) != 3:
raise ValueError('Invalid resolution dimensions.')
volume.spacing = resolution
else:
volume = wrap(volume)
if not is_pyvista_dataset(volume):
raise TypeError(
f'Object type ({type(volume)}) not supported for plotting in PyVista.'
)
else:
# HACK: Make a copy so the original object is not altered.
# Also, place all data on the nodes as issues arise when
# volume rendering on the cells.
volume = volume.cell_data_to_point_data()
if name is None:
name = f'{type(volume).__name__}({volume.memory_address})'
if isinstance(volume, pyvista.MultiBlock):
from itertools import cycle
cycler = cycle(['Reds', 'Greens', 'Blues', 'Greys', 'Oranges', 'Purples'])
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(volume.GetNumberOfBlocks()):
if volume[idx] is None:
continue
# Get a good name to use
next_name = f'{name}-{idx}'
# Get the data object
block = wrap(volume.GetBlock(idx))
if resolution is None:
try:
block_resolution = block.GetSpacing()
except AttributeError:
block_resolution = resolution
else:
block_resolution = resolution
if multi_colors:
color = next(cycler)
else:
color = cmap
a = self.add_volume(
block,
resolution=block_resolution,
opacity=opacity,
n_colors=n_colors,
cmap=color,
flip_scalars=flip_scalars,
reset_camera=reset_camera,
name=next_name,
ambient=ambient,
categories=categories,
culling=culling,
clim=clim,
mapper=mapper,
pickable=pickable,
opacity_unit_distance=opacity_unit_distance,
shade=shade,
diffuse=diffuse,
specular=specular,
specular_power=specular_power,
render=render,
)
actors.append(a)
return actors
if not isinstance(volume, pyvista.UniformGrid):
raise TypeError(
f'Type {type(volume)} not supported for volume rendering at this time. Use `pyvista.UniformGrid`.'
)
if opacity_unit_distance is None:
opacity_unit_distance = volume.length / (np.mean(volume.dimensions) - 1)
if scalars is None:
# Make sure scalars components are not vectors/tuples
scalars = volume.active_scalars
# Don't allow plotting of string arrays by default
if scalars is not None and np.issubdtype(scalars.dtype, np.number):
scalar_bar_args.setdefault('title', volume.active_scalars_info[1])
else:
raise ValueError('No scalars to use for volume rendering.')
elif isinstance(scalars, str):
pass
##############
title = 'Data'
if isinstance(scalars, str):
title = scalars
scalars = get_array(volume, scalars, preference=preference, err=True)
scalar_bar_args.setdefault('title', title)
if not isinstance(scalars, np.ndarray):
scalars = np.asarray(scalars)
if not np.issubdtype(scalars.dtype, np.number):
raise TypeError('Non-numeric scalars are currently not supported for volume rendering.')
if scalars.ndim != 1:
scalars = scalars.ravel()
if scalars.dtype == np.bool_ or scalars.dtype == np.uint8:
scalars = scalars.astype(np.float_)
# Define mapper, volume, and add the correct properties
mappers = {
'fixed_point': _vtk.vtkFixedPointVolumeRayCastMapper,
'gpu': _vtk.vtkGPUVolumeRayCastMapper,
'open_gl': _vtk.vtkOpenGLGPUVolumeRayCastMapper,
'smart': _vtk.vtkSmartVolumeMapper,
}
if not isinstance(mapper, str) or mapper not in mappers.keys():
raise TypeError(
f"Mapper ({mapper}) unknown. Available volume mappers include: {', '.join(mappers.keys())}"
)
self.mapper = make_mapper(mappers[mapper])
# Scalars interpolation approach
if scalars.shape[0] == volume.n_points:
volume.point_data.set_array(scalars, title, True)
self.mapper.SetScalarModeToUsePointData()
elif scalars.shape[0] == volume.n_cells:
volume.cell_data.set_array(scalars, title, True)
self.mapper.SetScalarModeToUseCellData()
else:
raise_not_matching(scalars, volume)
# Set scalars range
if clim is None:
clim = [np.nanmin(scalars), np.nanmax(scalars)]
elif isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
###############
scalars = scalars.astype(np.float_)
with np.errstate(invalid='ignore'):
idxs0 = scalars < clim[0]
idxs1 = scalars > clim[1]
scalars[idxs0] = clim[0]
scalars[idxs1] = clim[1]
scalars = ((scalars - np.nanmin(scalars)) / (np.nanmax(scalars) - np.nanmin(scalars))) * 255
# scalars = scalars.astype(np.uint8)
volume[title] = scalars
self.mapper.scalar_range = clim
# Set colormap and build lookup table
table = _vtk.vtkLookupTable()
# table.SetNanColor(nan_color) # NaN's are chopped out with current implementation
# above/below colors not supported with volume rendering
if isinstance(annotations, dict):
for val, anno in annotations.items():
table.SetAnnotation(float(val), str(anno))
if cmap is None: # Set default map if matplotlib is available
if _has_matplotlib():
cmap = self._theme.cmap
if cmap is not None:
if not _has_matplotlib():
raise ImportError('Please install matplotlib for volume rendering.')
cmap = get_cmap_safe(cmap)
if categories:
if categories is True:
n_colors = len(np.unique(scalars))
elif isinstance(categories, int):
n_colors = categories
if flip_scalars:
cmap = cmap.reversed()
color_tf = _vtk.vtkColorTransferFunction()
for ii in range(n_colors):
color_tf.AddRGBPoint(ii, *cmap(ii)[:-1])
# Set opacities
if isinstance(opacity, (float, int)):
opacity_values = [opacity] * n_colors
elif isinstance(opacity, str):
opacity_values = pyvista.opacity_transfer_function(opacity, n_colors)
elif isinstance(opacity, (np.ndarray, list, tuple)):
opacity = np.array(opacity)
opacity_values = opacity_transfer_function(opacity, n_colors)
opacity_tf = _vtk.vtkPiecewiseFunction()
for ii in range(n_colors):
opacity_tf.AddPoint(ii, opacity_values[ii] / n_colors)
# Now put color tf and opacity tf into a lookup table for the scalar bar
table.SetNumberOfTableValues(n_colors)
lut = cmap(np.array(range(n_colors))) * 255
lut[:, 3] = opacity_values
lut = lut.astype(np.uint8)
table.SetTable(_vtk.numpy_to_vtk(lut))
table.SetRange(*clim)
self.mapper.lookup_table = table
self.mapper.SetInputData(volume)
blending = blending.lower()
if blending in ['additive', 'add', 'sum']:
self.mapper.SetBlendModeToAdditive()
elif blending in ['average', 'avg', 'average_intensity']:
self.mapper.SetBlendModeToAverageIntensity()
elif blending in ['composite', 'comp']:
self.mapper.SetBlendModeToComposite()
elif blending in ['maximum', 'max', 'maximum_intensity']:
self.mapper.SetBlendModeToMaximumIntensity()
elif blending in ['minimum', 'min', 'minimum_intensity']:
self.mapper.SetBlendModeToMinimumIntensity()
else:
raise ValueError(
f'Blending mode {blending!r} invalid. '
'Please choose one of "additive", '
'"composite", "minimum" or "maximum".'
)
self.mapper.Update()
self.volume = _vtk.vtkVolume()
self.volume.SetMapper(self.mapper)
prop = _vtk.vtkVolumeProperty()
prop.SetColor(color_tf)
prop.SetScalarOpacity(opacity_tf)
prop.SetAmbient(ambient)
prop.SetScalarOpacityUnitDistance(opacity_unit_distance)
prop.SetShade(shade)
prop.SetDiffuse(diffuse)
prop.SetSpecular(specular)
prop.SetSpecularPower(specular_power)
self.volume.SetProperty(prop)
actor, prop = self.add_actor(
self.volume,
reset_camera=reset_camera,
name=name,
culling=culling,
pickable=pickable,
render=render,
)
# Add scalar bar if scalars are available
if show_scalar_bar and scalars is not None:
self.add_scalar_bar(**scalar_bar_args)
self.renderer.Modified()
return actor
def add_silhouette(self, mesh, params=None):
"""Add a silhouette of a PyVista or VTK dataset to the scene.
A silhouette can also be generated directly in
:func:`add_mesh <pyvista.Plotter.add_mesh>`. See also
:ref:`silhouette_example`.
Parameters
----------
mesh : pyvista.PolyData
Mesh for generating silhouette to plot.
params : dict, optional
* If not supplied, the default theme values will be used.
* ``color``: ``color_like``, color of the silhouette
* ``line_width``: ``float``, edge width
* ``opacity``: ``float`` between 0 and 1, edge transparency
* ``feature_angle``: If a ``float``, display sharp edges
exceeding that angle in degrees.
* ``decimate``: ``float`` between 0 and 1, level of decimation
Returns
-------
vtk.vtkActor
VTK actor of the silhouette.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> bunny = examples.download_bunny()
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(bunny, color='tan')
>>> _ = plotter.add_silhouette(bunny,
... params={'color': 'red', 'line_width': 8.0})
>>> plotter.view_xy()
>>> plotter.show()
"""
silhouette_params = self._theme.silhouette.to_dict()
if params:
silhouette_params.update(params)
if not is_pyvista_dataset(mesh):
mesh = wrap(mesh)
if not isinstance(mesh, pyvista.PolyData):
raise TypeError(f"Expected type is `PolyData` but {type(mesh)} was given.")
if isinstance(silhouette_params["decimate"], float):
silhouette_mesh = mesh.decimate(silhouette_params["decimate"])
else:
silhouette_mesh = mesh
alg = _vtk.vtkPolyDataSilhouette()
alg.SetInputData(silhouette_mesh)
alg.SetCamera(self.renderer.camera)
if silhouette_params["feature_angle"] is not None:
alg.SetEnableFeatureAngle(True)
alg.SetFeatureAngle(silhouette_params["feature_angle"])
else:
alg.SetEnableFeatureAngle(False)
mapper = make_mapper(_vtk.vtkDataSetMapper)
mapper.SetInputConnection(alg.GetOutputPort())
actor, prop = self.add_actor(mapper)
prop.SetColor(Color(silhouette_params["color"]).float_rgb)
prop.SetOpacity(silhouette_params["opacity"])
prop.SetLineWidth(silhouette_params["line_width"])
return actor
def update_scalar_bar_range(self, clim, name=None):
"""Update the value range of the active or named scalar bar.
Parameters
----------
clim : sequence
The new range of scalar bar. Two item list (e.g. ``[-1, 2]``).
name : str, optional
The title of the scalar bar to update.
"""
if isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
if len(clim) != 2:
raise TypeError('clim argument must be a length 2 iterable of values: (min, max).')
if name is None:
if not hasattr(self, 'mapper'):
raise AttributeError('This plotter does not have an active mapper.')
self.mapper.scalar_range = clim
return
# Use the name to find the desired actor
def update_mapper(mapper_helper):
mapper_helper.scalar_range = clim
return
try:
for mh in self._scalar_bar_mappers[name]:
update_mapper(mh)
except KeyError:
raise KeyError('Name ({}) not valid/not found in this plotter.')
return
def clear(self):
"""Clear plot by removing all actors and properties.
Examples
--------
>>> import pyvista
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(pyvista.Sphere())
>>> plotter.clear()
>>> plotter.renderer.actors
{}
"""
self.renderers.clear()
self.scalar_bars.clear()
self.mesh = None
def link_views(self, views=0):
"""Link the views' cameras.
Parameters
----------
views : int | tuple or list
If ``views`` is int, link the views to the given view
index or if ``views`` is a tuple or a list, link the given
views cameras.
"""
if isinstance(views, (int, np.integer)):
for renderer in self.renderers:
renderer.camera = self.renderers[views].camera
return
views = np.asarray(views)
if np.issubdtype(views.dtype, np.integer):
for view_index in views:
self.renderers[view_index].camera = self.renderers[views[0]].camera
else:
raise TypeError(f'Expected type is int, list or tuple: {type(views)} is given')
def unlink_views(self, views=None):
"""Unlink the views' cameras.
Parameters
----------
views : None, int, tuple or list
If ``views`` is None unlink all the views, if ``views``
is int unlink the selected view's camera or if ``views``
is a tuple or a list, unlink the given views cameras.
"""
if views is None:
for renderer in self.renderers:
renderer.camera = Camera()
renderer.reset_camera()
elif isinstance(views, int):
self.renderers[views].camera = Camera()
self.renderers[views].reset_camera()
elif isinstance(views, collections.abc.Iterable):
for view_index in views:
self.renderers[view_index].camera = Camera()
self.renderers[view_index].reset_camera()
else:
raise TypeError(f'Expected type is None, int, list or tuple: {type(views)} is given')
@wraps(ScalarBars.add_scalar_bar)
def add_scalar_bar(self, *args, **kwargs):
"""Wrap for ``ScalarBars.add_scalar_bar``."""
# only render when the plotter has already been shown
render = kwargs.get('render', None)
if render is None:
kwargs['render'] = not self._first_time
# check if maper exists
mapper = kwargs.get('mapper', None)
if mapper is None:
if not hasattr(self, 'mapper') or self.mapper is None:
raise AttributeError('Mapper does not exist. Add a mesh with scalars first.')
kwargs['mapper'] = self.mapper
# title can be the first and only arg
if len(args):
title = args[0]
else:
title = kwargs.get('title', '')
if title is None:
title = ''
kwargs['title'] = title
interactive = kwargs.get('interactive', None)
if interactive is None:
interactive = self._theme.interactive
if self.shape != (1, 1):
interactive = False
elif interactive and self.shape != (1, 1):
raise ValueError('Interactive scalar bars disabled for multi-renderer plots')
# by default, use the plotter local theme
kwargs.setdefault('theme', self._theme)
return self.scalar_bars.add_scalar_bar(**kwargs)
def update_scalars(self, scalars, mesh=None, render=True):
"""Update scalars of an object in the plotter.
Parameters
----------
scalars : np.ndarray
Scalars to replace existing scalars.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Force a render when True. Default ``True``.
"""
if mesh is None:
mesh = self.mesh
if isinstance(mesh, (collections.abc.Iterable, pyvista.MultiBlock)):
# Recursive if need to update scalars on many meshes
for m in mesh:
self.update_scalars(scalars, mesh=m, render=False)
if render:
self.render()
return
if isinstance(scalars, str):
# Grab scalars array if name given
scalars = get_array(mesh, scalars)
if scalars is None:
if render:
self.render()
return
if scalars.shape[0] == mesh.GetNumberOfPoints():
data = mesh.GetPointData()
elif scalars.shape[0] == mesh.GetNumberOfCells():
data = mesh.GetCellData()
else:
raise_not_matching(scalars, mesh)
vtk_scalars = data.GetScalars()
if vtk_scalars is None:
raise ValueError('No active scalars')
s = convert_array(vtk_scalars)
s[:] = scalars
data.Modified()
try:
# Why are the points updated here? Not all datasets have points
# and only the scalars array is modified by this function...
mesh.GetPoints().Modified()
except:
pass
if render:
self.render()
def update_coordinates(self, points, mesh=None, render=True):
"""Update the points of an object in the plotter.
Parameters
----------
points : np.ndarray
Points to replace existing points.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Force a render when True. Default ``True``.
"""
if mesh is None:
mesh = self.mesh
mesh.points = points
# only render when the plotter has already been shown
if render is None:
render = not self._first_time
if render:
self.render()
def _clear_ren_win(self):
"""Clear the render window."""
if hasattr(self, 'ren_win'):
self.ren_win.Finalize()
del self.ren_win
def close(self, render=False):
"""Close the render window.
Parameters
----------
render : bool
Unused argument.
"""
# optionally run just prior to exiting the plotter
if self._before_close_callback is not None:
self._before_close_callback(self)
self._before_close_callback = None
# must close out widgets first
super().close()
# Renderer has an axes widget, so close it
self.renderers.close()
self.renderers.remove_all_lights()
# Grab screenshots of last render
if self._store_image:
self.last_image = self.screenshot(None, return_img=True)
self.last_image_depth = self.get_image_depth()
# reset scalar bars
self.clear()
# grab the display id before clearing the window
# this is an experimental feature
if KILL_DISPLAY: # pragma: no cover
disp_id = None
if hasattr(self, 'ren_win'):
disp_id = self.ren_win.GetGenericDisplayId()
self._clear_ren_win()
if self.iren is not None:
self.iren.remove_observers()
self.iren.terminate_app()
if KILL_DISPLAY: # pragma: no cover
_kill_display(disp_id)
self.iren = None
if hasattr(self, 'textActor'):
del self.textActor
# end movie
if hasattr(self, 'mwriter'):
try:
self.mwriter.close()
except BaseException:
pass
# this helps managing closed plotters
self._closed = True
def deep_clean(self):
"""Clean the plotter of the memory."""
if hasattr(self, 'renderers'):
self.renderers.deep_clean()
if getattr(self, 'mesh', None) is not None:
self.mesh.point_data = None
self.mesh.cell_data = None
self.mesh = None
if getattr(self, 'mapper', None) is not None:
self.mapper.lookup_table = None
self.mapper = None
self.volume = None
self.textActor = None
def add_text(
self,
text,
position='upper_left',
font_size=18,
color=None,
font=None,
shadow=False,
name=None,
viewport=False,
*,
render=True,
):
"""Add text to plot object in the top left corner by default.
Parameters
----------
text : str
The text to add the rendering.
position : str, tuple(float), optional
Position to place the bottom left corner of the text box.
If tuple is used, the position of the text uses the pixel
coordinate system (default). In this case,
it returns a more general `vtkOpenGLTextActor`.
If string name is used, it returns a `vtkCornerAnnotation`
object normally used for fixed labels (like title or xlabel).
Default is to find the top left corner of the rendering window
and place text box up there. Available position: ``'lower_left'``,
``'lower_right'``, ``'upper_left'``, ``'upper_right'``,
``'lower_edge'``, ``'upper_edge'``, ``'right_edge'``, and
``'left_edge'``.
font_size : float, optional
Sets the size of the title font. Defaults to 18.
color : color_like, optional
Either a string, RGB list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1.0, 1.0, 1.0]``
* ``color='#FFFFFF'``
Defaults to :attr:`pyvista.global_theme.font.color <pyvista.themes._Font.color>`.
font : str, optional
Font name may be ``'courier'``, ``'times'``, or ``'arial'``.
shadow : bool, optional
Adds a black shadow to the text. Defaults to ``False``.
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
viewport : bool, optional
If ``True`` and position is a tuple of float, uses the
normalized viewport coordinate system (values between 0.0
and 1.0 and support for HiDPI).
render : bool, optional
Force a render when ``True`` (default).
Returns
-------
vtk.vtkTextActor
Text actor added to plot.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> actor = pl.add_text('Sample Text', position='upper_right', color='blue',
... shadow=True, font_size=26)
>>> pl.show()
"""
if font is None:
font = self._theme.font.family
if font_size is None:
font_size = self._theme.font.size
if position is None:
# Set the position of the text to the top left corner
window_size = self.window_size
x = (window_size[0] * 0.02) / self.shape[0]
y = (window_size[1] * 0.85) / self.shape[0]
position = [x, y]
corner_mappings = {
'lower_left': _vtk.vtkCornerAnnotation.LowerLeft,
'lower_right': _vtk.vtkCornerAnnotation.LowerRight,
'upper_left': _vtk.vtkCornerAnnotation.UpperLeft,
'upper_right': _vtk.vtkCornerAnnotation.UpperRight,
'lower_edge': _vtk.vtkCornerAnnotation.LowerEdge,
'upper_edge': _vtk.vtkCornerAnnotation.UpperEdge,
'left_edge': _vtk.vtkCornerAnnotation.LeftEdge,
'right_edge': _vtk.vtkCornerAnnotation.RightEdge,
}
corner_mappings['ll'] = corner_mappings['lower_left']
corner_mappings['lr'] = corner_mappings['lower_right']
corner_mappings['ul'] = corner_mappings['upper_left']
corner_mappings['ur'] = corner_mappings['upper_right']
corner_mappings['top'] = corner_mappings['upper_edge']
corner_mappings['bottom'] = corner_mappings['lower_edge']
corner_mappings['right'] = corner_mappings['right_edge']
corner_mappings['r'] = corner_mappings['right_edge']
corner_mappings['left'] = corner_mappings['left_edge']
corner_mappings['l'] = corner_mappings['left_edge']
if isinstance(position, (int, str, bool)):
if isinstance(position, str):
position = corner_mappings[position]
elif position is True:
position = corner_mappings['upper_left']
self.textActor = _vtk.vtkCornerAnnotation()
# This is how you set the font size with this actor
self.textActor.SetLinearFontScaleFactor(font_size // 2)
self.textActor.SetText(position, text)
else:
self.textActor = _vtk.vtkTextActor()
self.textActor.SetInput(text)
self.textActor.SetPosition(position)
if viewport:
self.textActor.GetActualPositionCoordinate().SetCoordinateSystemToNormalizedViewport()
self.textActor.GetActualPosition2Coordinate().SetCoordinateSystemToNormalizedViewport()
self.textActor.GetTextProperty().SetFontSize(int(font_size * 2))
self.textActor.GetTextProperty().SetColor(
Color(color, default_color=self._theme.font.color).float_rgb
)
self.textActor.GetTextProperty().SetFontFamily(FONTS[font].value)
self.textActor.GetTextProperty().SetShadow(shadow)
self.add_actor(self.textActor, reset_camera=False, name=name, pickable=False, render=render)
return self.textActor
def open_movie(self, filename, framerate=24, quality=5, **kwargs):
"""Establish a connection to the ffmpeg writer.
Parameters
----------
filename : str
Filename of the movie to open. Filename should end in mp4,
but other filetypes may be supported. See ``imagio.get_writer``.
framerate : int, optional
Frames per second.
quality : int, optional
Quality 10 is the top possible quality for any codec. The
range is ``0 - 10``. Higher quality leads to a larger file.
**kwargs : dict, optional
See the documentation for ``imageio.get_writer`` for additional kwargs.
Notes
-----
See the documentation for `imageio.get_writer
<https://imageio.readthedocs.io/en/stable/userapi.html#imageio.get_writer>`_
Examples
--------
Open a MP4 movie and set the quality to maximum.
>>> import pyvista
>>> pl = pyvista.Plotter
>>> pl.open_movie('movie.mp4', quality=10) # doctest:+SKIP
"""
from imageio import get_writer
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
self.mwriter = get_writer(filename, fps=framerate, quality=quality, **kwargs)
def open_gif(self, filename):
"""Open a gif file.
Parameters
----------
filename : str
Filename of the gif to open. Filename must end in ``"gif"``.
Examples
--------
Open a gif file.
>>> import pyvista
>>> pl = pyvista.Plotter
>>> pl.open_gif('movie.gif') # doctest:+SKIP
"""
from imageio import get_writer
if filename[-3:] != 'gif':
raise ValueError('Unsupported filetype. Must end in .gif')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
self._gif_filename = os.path.abspath(filename)
self.mwriter = get_writer(filename, mode='I')
def write_frame(self):
"""Write a single frame to the movie file.
Examples
--------
>>> import pyvista
>>> plotter = pyvista.Plotter()
>>> plotter.open_movie(filename) # doctest:+SKIP
>>> plotter.add_mesh(pyvista.Sphere()) # doctest:+SKIP
>>> plotter.write_frame() # doctest:+SKIP
See :ref:`movie_example` for a full example using this method.
"""
# if off screen, show has not been called and we must render
# before extracting an image
if self._first_time:
self._on_first_render_request()
self.render()
if not hasattr(self, 'mwriter'):
raise RuntimeError('This plotter has not opened a movie or GIF file.')
self.update()
self.mwriter.append_data(self.image)
def _run_image_filter(self, ifilter):
# Update filter and grab pixels
ifilter.Modified()
ifilter.Update()
image = pyvista.wrap(ifilter.GetOutput())
img_size = image.dimensions
img_array = pyvista.utilities.point_array(image, 'ImageScalars')
# Reshape and write
tgt_size = (img_size[1], img_size[0], -1)
return img_array.reshape(tgt_size)[::-1]
def get_image_depth(self, fill_value=np.nan, reset_camera_clipping_range=True):
"""Return a depth image representing current render window.
Parameters
----------
fill_value : float, optional
Fill value for points in image that do not include objects
in scene. To not use a fill value, pass ``None``.
reset_camera_clipping_range : bool, optional
Reset the camera clipping range to include data in view.
Returns
-------
numpy.ndarray
Image of depth values from camera orthogonal to image
plane.
Notes
-----
Values in image_depth are negative to adhere to a
right-handed coordinate system.
Examples
--------
>>> import pyvista
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(pyvista.Sphere())
>>> plotter.store_image = True
>>> plotter.show()
>>> zval = plotter.get_image_depth()
"""
# allow no render window
if not hasattr(self, 'ren_win') and self.last_image_depth is not None:
zval = self.last_image_depth.copy()
if fill_value is not None:
zval[self._image_depth_null] = fill_value
return zval
self._check_rendered()
self._check_has_ren_win()
# Ensure points in view are within clipping range of renderer?
if reset_camera_clipping_range:
self.renderer.ResetCameraClippingRange()
# Get the z-buffer image
ifilter = _vtk.vtkWindowToImageFilter()
ifilter.SetInput(self.ren_win)
ifilter.ReadFrontBufferOff()
ifilter.SetInputBufferTypeToZBuffer()
zbuff = self._run_image_filter(ifilter)[:, :, 0]
# Convert z-buffer values to depth from camera
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
near, far = self.camera.clipping_range
if self.camera.parallel_projection:
zval = (zbuff - near) / (far - near)
else:
zval = 2 * near * far / ((zbuff - 0.5) * 2 * (far - near) - near - far)
# Consider image values outside clipping range as nans
self._image_depth_null = np.logical_or(zval < -far, np.isclose(zval, -far))
if fill_value is not None:
zval[self._image_depth_null] = fill_value
return zval
def add_lines(self, lines, color='w', width=5, label=None, name=None):
"""Add lines to the plotting object.
Parameters
----------
lines : np.ndarray or pyvista.PolyData
Points representing line segments. For example, two line
segments would be represented as ``np.array([[0, 0, 0],
[1, 0, 0], [1, 0, 0], [1, 1, 0]])``.
color : color_like, optional
Either a string, rgb list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1.0, 1.0, 1.0]``
* ``color='#FFFFFF'``
width : float, optional
Thickness of lines.
label : str, optional
String label to use when adding a legend to the scene with
:func:`pyvista.BasePlotter.add_legend`.
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Returns
-------
vtk.vtkActor
Lines actor.
Examples
--------
>>> import numpy as np
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> points = np.array([[0, 1, 0], [1, 0, 0], [1, 1, 0], [2, 0, 0]])
>>> actor = pl.add_lines(points, color='yellow', width=3)
>>> pl.camera_position = 'xy'
>>> pl.show()
"""
if not isinstance(lines, np.ndarray):
raise TypeError('Input should be an array of point segments')
lines = pyvista.lines_from_points(lines)
# Create mapper and add lines
mapper = _vtk.vtkDataSetMapper()
mapper.SetInputData(lines)
rgb_color = Color(color)
# Create actor
actor = _vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetLineWidth(width)
actor.GetProperty().EdgeVisibilityOn()
actor.GetProperty().SetEdgeColor(rgb_color.float_rgb)
actor.GetProperty().SetColor(rgb_color.float_rgb)
actor.GetProperty().LightingOff()
# legend label
if label:
if not isinstance(label, str):
raise TypeError('Label must be a string')
addr = actor.GetAddressAsString("")
self.renderer._labels[addr] = [lines, label, rgb_color]
# Add to renderer
self.add_actor(actor, reset_camera=False, name=name, pickable=False)
return actor
@wraps(ScalarBars.remove_scalar_bar)
def remove_scalar_bar(self, *args, **kwargs):
"""Remove the active scalar bar."""
self.scalar_bars.remove_scalar_bar(*args, **kwargs)
def add_point_labels(
self,
points,
labels,
italic=False,
bold=True,
font_size=None,
text_color=None,
font_family=None,
shadow=False,
show_points=True,
point_color=None,
point_size=5,
name=None,
shape_color='grey',
shape='rounded_rect',
fill_shape=True,
margin=3,
shape_opacity=1.0,
pickable=False,
render_points_as_spheres=False,
tolerance=0.001,
reset_camera=None,
always_visible=False,
render=True,
):
"""Create a point actor with one label from list labels assigned to each point.
Parameters
----------
points : sequence or pyvista.DataSet
An ``n x 3`` sequence points or pyvista dataset with points.
labels : list or str
List of labels. Must be the same length as points. If a
string name is given with a :class:`pyvista.DataSet` input for
points, then these are fetched.
italic : bool, optional
Italicises title and bar labels. Default ``False``.
bold : bool, optional
Bolds title and bar labels. Default ``True``.
font_size : float, optional
Sets the size of the title font. Defaults to 16.
text_color : color_like, optional
Color of text. Either a string, RGB sequence, or hex color string.
* ``text_color='white'``
* ``text_color='w'``
* ``text_color=[1.0, 1.0, 1.0]``
* ``text_color='#FFFFFF'``
font_family : str, optional
Font family. Must be either ``'courier'``, ``'times'``,
or ``'arial``.
shadow : bool, optional
Adds a black shadow to the text. Defaults to ``False``.
show_points : bool, optional
Controls if points are visible. Default ``True``.
point_color : color_like, optional
Either a string, rgb list, or hex color string. One of
the following.
* ``point_color='white'``
* ``point_color='w'``
* ``point_color=[1.0, 1.0, 1.0]``
* ``point_color='#FFFFFF'``
point_size : float, optional
Size of points if visible.
name : str, optional
The name for the added actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
shape_color : color_like, optional
Color of points (if visible). Either a string, rgb
sequence, or hex color string.
shape : str, optional
The string name of the shape to use. Options are ``'rect'`` or
``'rounded_rect'``. If you want no shape, pass ``None``.
fill_shape : bool, optional
Fill the shape with the ``shape_color``. Outlines if ``False``.
margin : int, optional
The size of the margin on the label background shape. Default is 3.
shape_opacity : float, optional
The opacity of the shape in the range of ``[0, 1]``.
pickable : bool, optional
Set whether this actor is pickable.
render_points_as_spheres : bool, optional
Render points as spheres rather than dots.
tolerance : float, optional
A tolerance to use to determine whether a point label is
visible. A tolerance is usually required because the
conversion from world space to display space during
rendering introduces numerical round-off.
reset_camera : bool, optional
Reset the camera after adding the points to the scene.
always_visible : bool, optional
Skip adding the visibility filter. Default False.
render : bool, optional
Force a render when ``True`` (default).
Returns
-------
vtk.vtkActor2D
VTK label actor. Can be used to change properties of the labels.
Examples
--------
>>> import numpy as np
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> points = np.array([[0.0, 0.0, 0.0],
... [1.0, 1.0, 0.0],
... [2.0, 0.0, 0.0]])
>>> labels = ['Point A', 'Point B', 'Point C']
>>> actor = pl.add_point_labels(points, labels, italic=True, font_size=20,
... point_color='red', point_size=20,
... render_points_as_spheres=True,
... always_visible=True, shadow=True)
>>> pl.camera_position = 'xy'
>>> pl.show()
"""
if font_family is None:
font_family = self._theme.font.family
if font_size is None:
font_size = self._theme.font.size
point_color = Color(point_color, default_color=self._theme.color)
if isinstance(points, (list, tuple)):
points = np.array(points)
if isinstance(points, np.ndarray):
vtkpoints = pyvista.PolyData(points) # Cast to poly data
elif is_pyvista_dataset(points):
vtkpoints = pyvista.PolyData(points.points)
if isinstance(labels, str):
labels = points.point_data[labels]
else:
raise TypeError(f'Points type not usable: {type(points)}')
if len(vtkpoints.points) != len(labels):
raise ValueError('There must be one label for each point')
if name is None:
name = f'{type(vtkpoints).__name__}({vtkpoints.memory_address})'
vtklabels = _vtk.vtkStringArray()
vtklabels.SetName('labels')
for item in labels:
vtklabels.InsertNextValue(str(item))
vtkpoints.GetPointData().AddArray(vtklabels)
# Create hierarchy
hier = _vtk.vtkPointSetToLabelHierarchy()
hier.SetLabelArrayName('labels')
if always_visible:
hier.SetInputData(vtkpoints)
else:
# Only show visible points
vis_points = _vtk.vtkSelectVisiblePoints()
vis_points.SetInputData(vtkpoints)
vis_points.SetRenderer(self.renderer)
vis_points.SetTolerance(tolerance)
hier.SetInputConnection(vis_points.GetOutputPort())
# create label mapper
labelMapper = _vtk.vtkLabelPlacementMapper()
labelMapper.SetInputConnection(hier.GetOutputPort())
if not isinstance(shape, str):
labelMapper.SetShapeToNone()
elif shape.lower() in 'rect':
labelMapper.SetShapeToRect()
elif shape.lower() in 'rounded_rect':
labelMapper.SetShapeToRoundedRect()
else:
raise ValueError(f'Shape ({shape}) not understood')
if fill_shape:
labelMapper.SetStyleToFilled()
else:
labelMapper.SetStyleToOutline()
labelMapper.SetBackgroundColor(Color(shape_color).float_rgb)
labelMapper.SetBackgroundOpacity(shape_opacity)
labelMapper.SetMargin(margin)
textprop = hier.GetTextProperty()
textprop.SetItalic(italic)
textprop.SetBold(bold)
textprop.SetFontSize(font_size)
textprop.SetFontFamily(parse_font_family(font_family))
textprop.SetColor(Color(text_color, default_color=self._theme.font.color).float_rgb)
textprop.SetShadow(shadow)
self.remove_actor(f'{name}-points', reset_camera=False)
self.remove_actor(f'{name}-labels', reset_camera=False)
# add points
if show_points:
self.add_mesh(
vtkpoints,
color=point_color,
point_size=point_size,
name=f'{name}-points',
pickable=pickable,
render_points_as_spheres=render_points_as_spheres,
reset_camera=reset_camera,
render=render,
)
label_actor = _vtk.vtkActor2D()
label_actor.SetMapper(labelMapper)
self.add_actor(label_actor, reset_camera=False, name=f'{name}-labels', pickable=False)
return label_actor
def add_point_scalar_labels(self, points, labels, fmt=None, preamble='', **kwargs):
"""Label the points from a dataset with the values of their scalars.
Wrapper for :func:`pyvista.BasePlotter.add_point_labels`.
Parameters
----------
points : numpy.ndarray or pyvista.DataSet
An ``n x 3`` numpy.ndarray or pyvista dataset with points.
labels : str, optional
String name of the point data array to use.
fmt : str, optional
String formatter used to format numerical data.
preamble : str, optional
Text before the start of each label.
**kwargs : dict, optional
Keyword arguments passed to
:func:`pyvista.BasePlotter.add_point_labels`.
Returns
-------
vtk.vtkActor2D
VTK label actor. Can be used to change properties of the labels.
"""
if not is_pyvista_dataset(points):
raise TypeError(f'input points must be a pyvista dataset, not: {type(points)}')
if not isinstance(labels, str):
raise TypeError('labels must be a string name of the scalars array to use')
if fmt is None:
fmt = self._theme.font.fmt
if fmt is None:
fmt = '%.6e'
scalars = points.point_data[labels]
phrase = f'{preamble} %.3e'
labels = [phrase % val for val in scalars]
return self.add_point_labels(points, labels, **kwargs)
def add_points(self, points, **kwargs):
"""Add points to a mesh.
Parameters
----------
points : numpy.ndarray or pyvista.DataSet
Array of points or the points from a pyvista object.
**kwargs : dict, optional
See :func:`pyvista.BasePlotter.add_mesh` for optional
keyword arguments.
Returns
-------
vtk.vtkActor
Actor of the mesh.
Examples
--------
Add a numpy array of points to a mesh.
>>> import numpy as np
>>> import pyvista
>>> points = np.random.random((10, 3))
>>> pl = pyvista.Plotter()
>>> actor = pl.add_points(points, render_points_as_spheres=True,
... point_size=100.0)
>>> pl.show()
"""
kwargs['style'] = 'points'
return self.add_mesh(points, **kwargs)
def add_arrows(self, cent, direction, mag=1, **kwargs):
"""Add arrows to the plotter.
Parameters
----------
cent : np.ndarray
Array of centers.
direction : np.ndarray
Array of direction vectors.
mag : float, optional
Amount to scale the direction vectors.
**kwargs : dict, optional
See :func:`pyvista.BasePlotter.add_mesh` for optional
keyword arguments.
Returns
-------
vtk.vtkActor
VTK actor of the arrows.
Examples
--------
Plot a random field of vectors and save a screenshot of it.
>>> import numpy as np
>>> import pyvista
>>> cent = np.random.random((10, 3))
>>> direction = np.random.random((10, 3))
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_arrows(cent, direction, mag=2)
>>> plotter.show()
"""
if cent.shape != direction.shape: # pragma: no cover
raise ValueError('center and direction arrays must have the same shape')
direction = direction.copy()
if cent.ndim != 2:
cent = cent.reshape((-1, 3))
if direction.ndim != 2:
direction = direction.reshape((-1, 3))
if mag != 1:
direction = direction * mag
pdata = pyvista.vector_poly_data(cent, direction)
# Create arrow object
arrow = _vtk.vtkArrowSource()
arrow.Update()
glyph3D = _vtk.vtkGlyph3D()
glyph3D.SetSourceData(arrow.GetOutput())
glyph3D.SetInputData(pdata)
glyph3D.SetVectorModeToUseVector()
glyph3D.Update()
arrows = wrap(glyph3D.GetOutput())
return self.add_mesh(arrows, **kwargs)
@staticmethod
def _save_image(image, filename, return_img):
"""Save to file and/or return a NumPy image array.
This is an internal helper.
"""
if not image.size:
raise ValueError('Empty image. Have you run plot() first?')
# write screenshot to file if requested
if isinstance(filename, (str, pathlib.Path, io.BytesIO)):
from PIL import Image
if isinstance(filename, (str, pathlib.Path)):
filename = pathlib.Path(filename)
if isinstance(pyvista.FIGURE_PATH, str) and not filename.is_absolute():
filename = pathlib.Path(os.path.join(pyvista.FIGURE_PATH, filename))
if not filename.suffix:
filename = filename.with_suffix('.png')
elif filename.suffix not in SUPPORTED_FORMATS:
raise ValueError(
f'Unsupported extension {filename.suffix}\n'
f'Must be one of the following: {SUPPORTED_FORMATS}'
)
filename = os.path.abspath(os.path.expanduser(str(filename)))
Image.fromarray(image).save(filename)
else:
Image.fromarray(image).save(filename, format="PNG")
# return image array if requested
if return_img:
return image
def save_graphic(self, filename, title='PyVista Export', raster=True, painter=True):
"""Save a screenshot of the rendering window as a graphic file.
This can be helpful for publication documents.
The supported formats are:
* ``'.svg'``
* ``'.eps'``
* ``'.ps'``
* ``'.pdf'``
* ``'.tex'``
Parameters
----------
filename : str
Path to fsave the graphic file to.
title : str, optional
Title to use within the file properties.
raster : bool, optional
Attempt to write 3D properties as a raster image.
painter : bool, optional
Configure the exporter to expect a painter-ordered 2D
rendering, that is, a rendering at a fixed depth where
primitives are drawn from the bottom up.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(examples.load_airplane(), smooth_shading=True)
>>> _ = pl.add_background_image(examples.mapfile)
>>> pl.save_graphic("img.svg") # doctest:+SKIP
"""
if not hasattr(self, 'ren_win'):
raise AttributeError('This plotter is closed and unable to save a screenshot.')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
filename = os.path.abspath(os.path.expanduser(filename))
extension = pyvista.fileio.get_ext(filename)
writer = _vtk.lazy_vtkGL2PSExporter()
modes = {
'.svg': writer.SetFileFormatToSVG,
'.eps': writer.SetFileFormatToEPS,
'.ps': writer.SetFileFormatToPS,
'.pdf': writer.SetFileFormatToPDF,
'.tex': writer.SetFileFormatToTeX,
}
if extension not in modes:
raise ValueError(
f"Extension ({extension}) is an invalid choice.\n\n"
f"Valid options include: {', '.join(modes.keys())}"
)
writer.CompressOff()
writer.SetFilePrefix(filename.replace(extension, ''))
writer.SetInput(self.ren_win)
modes[extension]()
writer.SetTitle(title)
writer.SetWrite3DPropsAsRasterImage(raster)
if painter:
writer.UsePainterSettings()
writer.Update()
def screenshot(
self, filename=None, transparent_background=None, return_img=True, window_size=None
):
"""Take screenshot at current camera position.
Parameters
----------
filename : str, pathlib.Path, BytesIO, optional
Location to write image to. If ``None``, no image is written.
transparent_background : bool, optional
Whether to make the background transparent. The default is
looked up on the plotter's theme.
return_img : bool, optional
If ``True`` (the default), a NumPy array of the image will
be returned.
window_size : 2-length tuple, optional
Set the plotter's size to this ``(width, height)`` before
taking the screenshot.
Returns
-------
numpy.ndarray
Array containing pixel RGB and alpha. Sized:
* [Window height x Window width x 3] if
``transparent_background`` is set to ``False``.
* [Window height x Window width x 4] if
``transparent_background`` is set to ``True``.
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> plotter = pyvista.Plotter(off_screen=True)
>>> actor = plotter.add_mesh(sphere)
>>> plotter.screenshot('screenshot.png') # doctest:+SKIP
"""
if window_size is not None:
self.window_size = window_size
# configure image filter
if transparent_background is None:
transparent_background = self._theme.transparent_background
self.image_transparent_background = transparent_background
# This if statement allows you to save screenshots of closed plotters
# This is needed for the sphinx-gallery to work
if not hasattr(self, 'ren_win'):
# If plotter has been closed...
# check if last_image exists
if self.last_image is not None:
# Save last image
return self._save_image(self.last_image, filename, return_img)
# Plotter hasn't been rendered or was improperly closed
raise RuntimeError('This plotter is closed and unable to save a screenshot.')
if self._first_time and not self.off_screen:
raise RuntimeError(
"Nothing to screenshot - call .show first or use the off_screen argument"
)
# if off screen, show has not been called and we must render
# before extracting an image
if self._first_time:
self._on_first_render_request()
self.render()
return self._save_image(self.image, filename, return_img)
@wraps(Renderers.set_background)
def set_background(self, *args, **kwargs):
"""Wrap ``Renderers.set_background``."""
self.renderers.set_background(*args, **kwargs)
def generate_orbital_path(self, factor=3.0, n_points=20, viewup=None, shift=0.0):
"""Generate an orbital path around the data scene.
Parameters
----------
factor : float, optional
A scaling factor when building the orbital extent.
n_points : int, optional
Number of points on the orbital path.
viewup : list(float), optional
The normal to the orbital plane.
shift : float, optional
Shift the plane up/down from the center of the scene by
this amount.
Returns
-------
pyvista.PolyData
PolyData containing the orbital path.
Examples
--------
Generate an orbital path around a sphere.
>>> import pyvista
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(pyvista.Sphere())
>>> viewup = [0, 0, 1]
>>> orbit = plotter.generate_orbital_path(factor=2.0, n_points=50,
... shift=0.0, viewup=viewup)
See :ref:`orbiting_example` for a full example using this method.
"""
if viewup is None:
viewup = self._theme.camera['viewup']
center = np.array(self.center)
bnds = np.array(self.bounds)
radius = (bnds[1] - bnds[0]) * factor
y = (bnds[3] - bnds[2]) * factor
if y > radius:
radius = y
center += np.array(viewup) * shift
return pyvista.Polygon(center=center, radius=radius, normal=viewup, n_sides=n_points)
def fly_to(self, point):
"""Move the current camera's focal point to a position point.
The movement is animated over the number of frames specified in
NumberOfFlyFrames. The LOD desired frame rate is used.
Parameters
----------
point : sequence
Point to fly to in the form of ``(x, y, z)``.
"""
self.iren.fly_to(self.renderer, point)
def orbit_on_path(
self,
path=None,
focus=None,
step=0.5,
viewup=None,
write_frames=False,
threaded=False,
progress_bar=False,
):
"""Orbit on the given path focusing on the focus point.
Parameters
----------
path : pyvista.PolyData
Path of orbital points. The order in the points is the order of
travel.
focus : list(float) of length 3, optional
The point of focus the camera.
step : float, optional
The timestep between flying to each camera position.
viewup : list(float), optional
The normal to the orbital plane.
write_frames : bool, optional
Assume a file is open and write a frame on each camera
view during the orbit.
threaded : bool, optional
Run this as a background thread. Generally used within a
GUI (i.e. PyQt).
progress_bar : bool, optional
Show the progress bar when proceeding through the path.
This can be helpful to show progress when generating
movies with ``off_screen=True``.
Examples
--------
Plot an orbit around the earth. Save the gif as a temporary file.
>>> import tempfile
>>> import os
>>> import pyvista
>>> filename = os.path.join(tempfile._get_default_tempdir(),
... next(tempfile._get_candidate_names()) + '.gif')
>>> from pyvista import examples
>>> plotter = pyvista.Plotter(window_size=[300, 300])
>>> _ = plotter.add_mesh(examples.load_globe(), smooth_shading=True)
>>> plotter.open_gif(filename)
>>> viewup = [0, 0, 1]
>>> orbit = plotter.generate_orbital_path(factor=2.0, n_points=24,
... shift=0.0, viewup=viewup)
>>> plotter.orbit_on_path(orbit, write_frames=True, viewup=viewup,
... step=0.02)
See :ref:`orbiting_example` for a full example using this method.
"""
if focus is None:
focus = self.center
if viewup is None:
viewup = self._theme.camera['viewup']
if path is None:
path = self.generate_orbital_path(viewup=viewup)
if not is_pyvista_dataset(path):
path = pyvista.PolyData(path)
points = path.points
# Make sure the whole scene is visible
self.camera.thickness = path.length
if progress_bar:
try:
from tqdm import tqdm
except ImportError: # pragma: no cover
raise ImportError("Please install `tqdm` to use ``progress_bar=True``")
def orbit():
"""Define the internal thread for running the orbit."""
if progress_bar:
points_seq = tqdm(points)
else:
points_seq = points
for point in points_seq:
tstart = time.time() # include the render time in the step time
self.set_position(point, render=False)
self.set_focus(focus, render=False)
self.set_viewup(viewup, render=False)
self.renderer.ResetCameraClippingRange()
if write_frames:
self.write_frame()
else:
self.render()
sleep_time = step - (time.time() - tstart)
if sleep_time > 0:
time.sleep(sleep_time)
if write_frames:
self.mwriter.close()
if threaded:
thread = Thread(target=orbit)
thread.start()
else:
orbit()
def export_vtkjs(self, filename, compress_arrays=False):
"""Export the current rendering scene as a VTKjs scene.
It can be used for rendering in a web browser.
Parameters
----------
filename : str
Filename to export the scene to. A filename extension of
``'.vtkjs'`` will be added.
compress_arrays : bool, optional
Enable array compression.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(examples.load_hexbeam())
>>> pl.export_vtkjs("sample") # doctest:+SKIP
"""
if not hasattr(self, 'ren_win'):
raise RuntimeError('Export must be called before showing/closing the scene.')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
else:
filename = os.path.abspath(os.path.expanduser(filename))
export_plotter_vtkjs(self, filename, compress_arrays=compress_arrays)
def export_obj(self, filename):
"""Export scene to OBJ format.
Parameters
----------
filename : str
Filename to export the scene to. Should end in ``'.obj'``.
Returns
-------
vtkOBJExporter
Object exporter.
"""
# lazy import vtkOBJExporter here as it takes a long time to
# load and is not always used
try:
from vtkmodules.vtkIOExport import vtkOBJExporter
except: # noqa: E722
from vtk import vtkOBJExporter
if not hasattr(self, "ren_win"):
raise RuntimeError("This plotter must still have a render window open.")
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
else:
filename = os.path.abspath(os.path.expanduser(filename))
exporter = vtkOBJExporter()
exporter.SetFilePrefix(filename)
exporter.SetRenderWindow(self.ren_win)
return exporter.Write()
def __del__(self):
"""Delete the plotter."""
# We have to check here if it has the closed attribute as it
# may not exist should the plotter have failed to initialize.
if hasattr(self, '_closed'):
if not self._closed:
self.close()
self.deep_clean()
if hasattr(self, 'renderers'):
del self.renderers
def add_background_image(self, image_path, scale=1, auto_resize=True, as_global=True):
"""Add a background image to a plot.
Parameters
----------
image_path : str
Path to an image file.
scale : float, optional
Scale the image larger or smaller relative to the size of
the window. For example, a scale size of 2 will make the
largest dimension of the image twice as large as the
largest dimension of the render window. Defaults to 1.
auto_resize : bool, optional
Resize the background when the render window changes size.
as_global : bool, optional
When multiple render windows are present, setting
``as_global=False`` will cause the background to only
appear in one window.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(pyvista.Sphere())
>>> plotter.add_background_image(examples.mapfile)
>>> plotter.show()
"""
if self.renderers.has_active_background_renderer:
raise RuntimeError(
'A background image already exists. '
'Remove it with ``remove_background_image`` '
'before adding one'
)
# Need to change the number of layers to support an additional
# background layer
if not self._has_background_layer:
self.ren_win.SetNumberOfLayers(3)
renderer = self.renderers.add_background_renderer(image_path, scale, as_global)
self.ren_win.AddRenderer(renderer)
# set up autoscaling of the image
if auto_resize: # pragma: no cover
self.iren.add_observer('ModifiedEvent', renderer.resize)
@wraps(Renderers.remove_background_image)
def remove_background_image(self):
"""Wrap ``Renderers.remove_background_image``."""
self.renderers.remove_background_image()
# return the active renderer to the top, otherwise flat background
# will not be rendered
self.renderer.layer = 0
def _on_first_render_request(self, cpos=None):
"""Once an image or render is officially requested, run this routine.
For example on the show call or any screenshot producing code.
"""
# reset unless camera for the first render unless camera is set
if self._first_time: # and not self.camera_set:
for renderer in self.renderers:
if not renderer.camera_set and cpos is None:
renderer.camera_position = renderer.get_default_cam_pos()
renderer.ResetCamera()
elif cpos is not None:
renderer.camera_position = cpos
self._first_time = False
def reset_camera_clipping_range(self):
"""Reset camera clipping planes."""
self.renderer.ResetCameraClippingRange()
def add_light(self, light, only_active=False):
"""Add a Light to the scene.
Parameters
----------
light : Light or vtkLight
The light to be added.
only_active : bool, optional
If ``True``, only add the light to the active
renderer. The default is that every renderer adds the
light. To add the light to an arbitrary renderer, see
:func:`pyvista.plotting.renderer.Renderer.add_light`.
Examples
--------
Create a plotter that we initialize with no lights, and add a
cube and a single headlight to it.
>>> import pyvista as pv
>>> plotter = pv.Plotter(lighting='none')
>>> _ = plotter.add_mesh(pv.Cube())
>>> light = pv.Light(color='cyan', light_type='headlight')
>>> plotter.add_light(light)
>>> plotter.show()
"""
renderers = [self.renderer] if only_active else self.renderers
for renderer in renderers:
renderer.add_light(light)
def remove_all_lights(self, only_active=False):
"""Remove all lights from the scene.
Parameters
----------
only_active : bool
If ``True``, only remove lights from the active
renderer. The default is that lights are stripped from
every renderer.
Examples
--------
Create a plotter and remove all lights after initialization.
Note how the mesh rendered is completely flat
>>> import pyvista as pv
>>> plotter = pv.Plotter()
>>> plotter.remove_all_lights()
>>> plotter.renderer.lights
[]
>>> _ = plotter.add_mesh(pv.Sphere(), show_edges=True)
>>> plotter.show()
Note how this differs from a plot with default lighting
>>> pv.Sphere().plot(show_edges=True, lighting=True)
"""
renderers = [self.renderer] if only_active else self.renderers
for renderer in renderers:
renderer.remove_all_lights()
def where_is(self, name):
"""Return the subplot coordinates of a given actor.
Parameters
----------
name : str
Actor's name.
Returns
-------
list(tuple(int))
A list with the subplot coordinates of the actor.
Examples
--------
>>> import pyvista as pv
>>> plotter = pv.Plotter(shape=(2, 2))
>>> plotter.subplot(0, 0)
>>> _ = plotter.add_mesh(pv.Box(), name='box')
>>> plotter.subplot(0, 1)
>>> _ = plotter.add_mesh(pv.Sphere(), name='sphere')
>>> plotter.subplot(1, 0)
>>> _ = plotter.add_mesh(pv.Box(), name='box')
>>> plotter.subplot(1, 1)
>>> _ = plotter.add_mesh(pv.Cone(), name='cone')
>>> plotter.where_is('box')
[(0, 0), (1, 0)]
>>> plotter.show()
"""
places = []
for index in range(len(self.renderers)):
if name in self.renderers[index]._actors:
places.append(tuple(self.renderers.index_to_loc(index)))
return places
class Plotter(BasePlotter):
"""Plotting object to display vtk meshes or numpy arrays.
Parameters
----------
off_screen : bool, optional
Renders off screen when ``True``. Useful for automated
screenshots.
notebook : bool, optional
When ``True``, the resulting plot is placed inline a jupyter
notebook. Assumes a jupyter console is active. Automatically
enables ``off_screen``.
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one render
window. Can also accept a string descriptor as shape. E.g.:
* ``shape="3|1"`` means 3 plots on the left and 1 on the right,
* ``shape="4/2"`` means 4 plots on top and 2 at the bottom.
border : bool, optional
Draw a border around each render window. Default ``False``.
border_color : color_like, optional
Either a string, rgb list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1.0, 1.0, 1.0]``
* ``color='#FFFFFF'``
window_size : list, optional
Window size in pixels. Defaults to ``[1024, 768]``, unless
set differently in the relevant theme's ``window_size``
property.
multi_samples : int, optional
The number of multi-samples used to mitigate aliasing. 4 is a
good default but 8 will have better results with a potential
impact on performance.
line_smoothing : bool, optional
If ``True``, enable line smoothing.
polygon_smoothing : bool, optional
If ``True``, enable polygon smoothing.
lighting : str, optional
What lighting to set up for the plotter.
Accepted options:
* ``'light_kit'``: a vtk Light Kit composed of 5 lights.
* ``'three lights'``: illumination using 3 lights.
* ``'none'``: no light sources at instantiation.
The default is a ``'light_kit'`` (to be precise, 5 separate
lights that act like a Light Kit).
theme : pyvista.themes.DefaultTheme, optional
Plot-specific theme.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> another_mesh = examples.load_uniform()
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(mesh, color='red')
>>> actor = plotter.add_mesh(another_mesh, color='blue')
>>> plotter.show()
"""
last_update_time = 0.0
right_timer_id = -1
def __init__(
self,
off_screen=None,
notebook=None,
shape=(1, 1),
groups=None,
row_weights=None,
col_weights=None,
border=None,
border_color='k',
border_width=2.0,
window_size=None,
multi_samples=None,
line_smoothing=False,
point_smoothing=False,
polygon_smoothing=False,
splitting_position=None,
title=None,
lighting='light kit',
theme=None,
):
"""Initialize a vtk plotting object."""
super().__init__(
shape=shape,
border=border,
border_color=border_color,
border_width=border_width,
groups=groups,
row_weights=row_weights,
col_weights=col_weights,
splitting_position=splitting_position,
title=title,
lighting=lighting,
theme=theme,
)
log.debug('Plotter init start')
# check if a plotting backend is enabled
_warn_xserver()
def on_timer(iren, event_id):
"""Exit application if interactive renderer stops."""
if event_id == 'TimerEvent' and self.iren._style != "Context":
self.iren.terminate_app()
if off_screen is None:
off_screen = pyvista.OFF_SCREEN
if notebook is None:
if self._theme.notebook is not None:
notebook = self._theme.notebook
else:
notebook = scooby.in_ipykernel()
self.notebook = notebook
if self.notebook:
off_screen = True
self.off_screen = off_screen
self._window_size_unset = False
if window_size is None:
self._window_size_unset = True
window_size = self._theme.window_size
self.__prior_window_size = window_size
if multi_samples is None:
multi_samples = self._theme.multi_samples
# initialize render window
self.ren_win = _vtk.vtkRenderWindow()
self.ren_win.SetMultiSamples(multi_samples)
self.ren_win.SetBorders(True)
if line_smoothing:
self.ren_win.LineSmoothingOn()
if point_smoothing:
self.ren_win.PointSmoothingOn()
if polygon_smoothing:
self.ren_win.PolygonSmoothingOn()
for renderer in self.renderers:
self.ren_win.AddRenderer(renderer)
# Add the shadow renderer to allow us to capture interactions within
# a given viewport
# https://vtk.org/pipermail/vtkusers/2018-June/102030.html
number_or_layers = self.ren_win.GetNumberOfLayers()
current_layer = self.renderer.GetLayer()
self.ren_win.SetNumberOfLayers(number_or_layers + 1)
self.ren_win.AddRenderer(self.renderers.shadow_renderer)
self.renderers.shadow_renderer.SetLayer(current_layer + 1)
self.renderers.shadow_renderer.SetInteractive(False) # never needs to capture
if self.off_screen:
self.ren_win.SetOffScreenRendering(1)
# vtkGenericRenderWindowInteractor has no event loop and
# allows the display client to close on Linux when
# off_screen. We still want an interactor for off screen
# plotting since there are some widgets (like the axes
# widget) that need an interactor
interactor = _vtk.vtkGenericRenderWindowInteractor()
else:
interactor = None
# Add ren win and interactor
self.iren = RenderWindowInteractor(self, light_follow_camera=False, interactor=interactor)
self.iren.set_render_window(self.ren_win)
self.enable_trackball_style() # internally calls update_style()
self.iren.add_observer("KeyPressEvent", self.key_press_event)
# Set camera widget based on theme. This requires that an
# interactor be present.
if self.theme._enable_camera_orientation_widget:
self.add_camera_orientation_widget()
# Set background
self.set_background(self._theme.background)
# Set window size
self.window_size = window_size
# add timer event if interactive render exists
self.iren.add_observer(_vtk.vtkCommand.TimerEvent, on_timer)
if self._theme.depth_peeling.enabled:
if self.enable_depth_peeling():
for renderer in self.renderers:
renderer.enable_depth_peeling()
log.debug('Plotter init stop')
def show(
self,
title=None,
window_size=None,
interactive=True,
auto_close=None,
interactive_update=False,
full_screen=None,
screenshot=False,
return_img=False,
cpos=None,
use_ipyvtk=None,
jupyter_backend=None,
return_viewer=False,
return_cpos=None,
**kwargs,
):
"""Display the plotting window.
Parameters
----------
title : str, optional
Title of plotting window. Defaults to
:attr:`pyvista.global_theme.title <pyvista.themes.DefaultTheme.title>`.
window_size : list, optional
Window size in pixels. Defaults to
:attr:`pyvista.global_theme.window_size <pyvista.themes.DefaultTheme.window_size>`.
interactive : bool, optional
Enabled by default. Allows user to pan and move figure.
Defaults to
:attr:`pyvista.global_theme.interactive <pyvista.themes.DefaultTheme.interactive>`.
auto_close : bool, optional
Exits plotting session when user closes the window when
interactive is ``True``. Defaults to
:attr:`pyvista.global_theme.auto_close <pyvista.themes.DefaultTheme.auto_close>`.
interactive_update : bool, optional
Disabled by default. Allows user to non-blocking draw,
user should call :func:`BasePlotter.update` in each iteration.
full_screen : bool, optional
Opens window in full screen. When enabled, ignores
``window_size``. Defaults to
:attr:`pyvista.global_theme.full_screen <pyvista.themes.DefaultTheme.full_screen>`.
screenshot : str, pathlib.Path, BytesIO or bool, optional
Take a screenshot of the initial state of the plot.
If a string, it specifies the path to which the screenshot
is saved. If ``True``, the screenshot is returned as an
array. Defaults to ``False``. For interactive screenshots
it's recommended to first call ``show()`` with
``auto_close=False`` to set the scene, then save the
screenshot in a separate call to ``show()`` or
:func:`Plotter.screenshot`.
return_img : bool
Returns a numpy array representing the last image along
with the camera position.
cpos : list(tuple(floats))
The camera position. You can also set this with
:attr:`Plotter.camera_position`.
use_ipyvtk : bool, optional
Deprecated. Instead, set the backend either globally with
``pyvista.set_jupyter_backend('ipyvtklink')`` or with
``backend='ipyvtklink'``.
jupyter_backend : str, optional
Jupyter notebook plotting backend to use. One of the
following:
* ``'none'`` : Do not display in the notebook.
* ``'pythreejs'`` : Show a ``pythreejs`` widget
* ``'static'`` : Display a static figure.
* ``'ipygany'`` : Show a ``ipygany`` widget
* ``'panel'`` : Show a ``panel`` widget.
This can also be set globally with
:func:`pyvista.set_jupyter_backend`.
return_viewer : bool, optional
Return the jupyterlab viewer, scene, or display object
when plotting with jupyter notebook.
return_cpos : bool, optional
Return the last camera position from the render window
when enabled. Default based on theme setting. See
:attr:`pyvista.themes.DefaultTheme.return_cpos`.
**kwargs : dict, optional
Developer keyword arguments.
Returns
-------
cpos : list
List of camera position, focal point, and view up.
Returned only when ``return_cpos=True`` or set in the
default global or plot theme. Not returned when in a
jupyter notebook and ``return_viewer=True``.
image : np.ndarray
Numpy array of the last image when either ``return_img=True``
or ``screenshot=True`` is set. Not returned when in a
jupyter notebook with ``return_viewer=True``. Optionally
contains alpha values. Sized:
* [Window height x Window width x 3] if the theme sets
``transparent_background=False``.
* [Window height x Window width x 4] if the theme sets
``transparent_background=True``.
widget
IPython widget when ``return_viewer=True``.
Notes
-----
Please use the ``q``-key to close the plotter as some
operating systems (namely Windows) will experience issues
saving a screenshot if the exit button in the GUI is pressed.
Examples
--------
Simply show the plot of a mesh.
>>> import pyvista as pv
>>> pl = pv.Plotter()
>>> _ = pl.add_mesh(pv.Cube())
>>> pl.show()
Take a screenshot interactively. Screenshot will be of the
first image shown, so use the first call with
``auto_close=False`` to set the scene before taking the
screenshot.
>>> pl = pv.Plotter()
>>> _ = pl.add_mesh(pv.Cube())
>>> pl.show(auto_close=False) # doctest:+SKIP
>>> pl.show(screenshot='my_image.png') # doctest:+SKIP
Display a ``pythreejs`` scene within a jupyter notebook
>>> pl.show(jupyter_backend='pythreejs') # doctest:+SKIP
Return a ``pythreejs`` scene.
>>> pl.show(jupyter_backend='pythreejs', return_viewer=True) # doctest:+SKIP
Obtain the camera position when using ``show``.
>>> pl = pv.Plotter()
>>> _ = pl.add_mesh(pv.Sphere())
>>> pl.show(return_cpos=True) # doctest:+SKIP
[(2.223005211686484, -0.3126909484828709, 2.4686209867735065),
(0.0, 0.0, 0.0),
(-0.6839951597283509, -0.47207319712073137, 0.5561452310578585)]
"""
# developer keyword argument: runs a function immediately prior to ``close``
self._before_close_callback = kwargs.pop('before_close_callback', None)
jupyter_kwargs = kwargs.pop('jupyter_kwargs', {})
assert_empty_kwargs(**kwargs)
if interactive_update and auto_close is None:
auto_close = False
elif interactive_update and auto_close:
warnings.warn(
textwrap.dedent(
"""
The plotter will close immediately automatically since ``auto_close=True``.
Either, do not specify ``auto_close``, or set it to ``False`` if you want to
interact with the plotter interactively.
"""
).strip()
)
elif auto_close is None:
auto_close = self._theme.auto_close
if use_ipyvtk:
txt = textwrap.dedent(
"""
use_ipyvtk is deprecated. Set the backend
globally with ``pyvista.set_jupyter_backend("ipyvtklink")
or with ``backend="ipyvtklink"``
"""
).strip()
from pyvista.core.errors import DeprecationError
raise DeprecationError(txt)
if not hasattr(self, "ren_win"):
raise RuntimeError("This plotter has been closed and cannot be shown.")
if full_screen is None:
full_screen = self._theme.full_screen
if full_screen:
self.ren_win.SetFullScreen(True)
self.ren_win.BordersOn() # super buggy when disabled
else:
if window_size is None:
window_size = self.window_size
else:
self._window_size_unset = False
self.ren_win.SetSize(window_size[0], window_size[1])
# reset unless camera for the first render unless camera is set
self._on_first_render_request(cpos)
# handle plotter notebook
if jupyter_backend and not self.notebook:
warnings.warn(
'Not within a jupyter notebook environment.\nIgnoring ``jupyter_backend``.'
)
if self.notebook:
from ..jupyter.notebook import handle_plotter
if jupyter_backend is None:
jupyter_backend = self._theme.jupyter_backend
if jupyter_backend != 'none':
disp = handle_plotter(
self, backend=jupyter_backend, return_viewer=return_viewer, **jupyter_kwargs
)
return disp
self.render()
# This has to be after the first render for some reason
if title is None:
title = self.title
if title:
self.ren_win.SetWindowName(title)
self.title = title
# Keep track of image for sphinx-gallery
if pyvista.BUILDING_GALLERY or screenshot:
# always save screenshots for sphinx_gallery
self.last_image = self.screenshot(screenshot, return_img=True)
self.last_image_depth = self.get_image_depth()
# See: https://github.com/pyvista/pyvista/issues/186#issuecomment-550993270
if interactive and not self.off_screen:
try: # interrupts will be caught here
log.debug('Starting iren')
self.iren.update_style()
if not interactive_update:
# Resolves #1260
if os.name == 'nt':
if _vtk.VTK9:
self.iren.process_events()
else:
global VERY_FIRST_RENDER
if not VERY_FIRST_RENDER:
self.iren.start()
VERY_FIRST_RENDER = False
self.iren.start()
self.iren.initialize()
except KeyboardInterrupt:
log.debug('KeyboardInterrupt')
self.close()
raise KeyboardInterrupt
# In the event that the user hits the exit-button on the GUI (on
# Windows OS) then it must be finalized and deleted as accessing it
# will kill the kernel.
# Here we check for that and clean it up before moving on to any of
# the closing routines that might try to still access that
# render window.
if not self.ren_win.IsCurrent():
self._clear_ren_win() # The ren_win is deleted
# proper screenshots cannot be saved if this happens
if not auto_close:
warnings.warn(
"`auto_close` ignored: by clicking the exit button, "
"you have destroyed the render window and we have to "
"close it out."
)
auto_close = True
# NOTE: after this point, nothing from the render window can be accessed
# as if a user presed the close button, then it destroys the
# the render view and a stream of errors will kill the Python
# kernel if code here tries to access that renderer.
# See issues #135 and #186 for insight before editing the
# remainder of this function.
# Close the render window if requested
if auto_close:
self.close()
# If user asked for screenshot, return as numpy array after camera
# position
if return_img or screenshot is True:
if return_cpos:
return self.camera_position, self.last_image
if return_cpos:
return self.camera_position
def add_title(self, title, font_size=18, color=None, font=None, shadow=False):
"""Add text to the top center of the plot.
This is merely a convenience method that calls ``add_text``
with ``position='upper_edge'``.
Parameters
----------
title : str
The text to add the rendering.
font_size : float, optional
Sets the size of the title font. Defaults to 16 or the
value of the global theme if set.
color : color_like, optional,
Either a string, rgb list, or hex color string. Defaults
to white or the value of the global theme if set. For
example:
* ``color='white'``
* ``color='w'``
* ``color=[1.0, 1.0, 1.0]``
* ``color='#FFFFFF'``
font : str, optional
Font name may be ``'courier'``, ``'times'``, or ``'arial'``.
shadow : bool, optional
Adds a black shadow to the text. Defaults to ``False``.
Returns
-------
vtk.vtkTextActor
Text actor added to plot.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.background_color = 'grey'
>>> actor = pl.add_title('Plot Title', font='courier', color='k',
... font_size=40)
>>> pl.show()
"""
# add additional spacing from the top of the figure by default
title = '\n' + title
return self.add_text(
title,
position='upper_edge',
font_size=font_size,
color=color,
font=font,
shadow=shadow,
name='title',
viewport=False,
)
def add_cursor(
self,
bounds=(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0),
focal_point=(0.0, 0.0, 0.0),
color=None,
):
"""Add a cursor of a PyVista or VTK dataset to the scene.
Parameters
----------
bounds : length 6 sequence
Specify the bounds in the format of:
- ``(xmin, xmax, ymin, ymax, zmin, zmax)``
Defaults to ``(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0)``.
focal_point : list or tuple, optional
The focal point of the cursor.
Defaults to ``(0.0, 0.0, 0.0)``.
color : color_like, optional
Either a string, RGB sequence, or hex color string. For one
of the following.
* ``color='white'``
* ``color='w'``
* ``color=[1.0, 1.0, 1.0]``
* ``color='#FFFFFF'``
Returns
-------
vtk.vtkActor
VTK actor of the 2D cursor.
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(sphere)
>>> _ = plotter.add_cursor()
>>> plotter.show()
"""
alg = _vtk.vtkCursor3D()
alg.SetModelBounds(bounds)
alg.SetFocalPoint(focal_point)
alg.AllOn()
mapper = make_mapper(_vtk.vtkDataSetMapper)
mapper.SetInputConnection(alg.GetOutputPort())
actor, prop = self.add_actor(mapper)
prop.SetColor(Color(color).float_rgb)
return actor
# Tracks created plotters. At the end of the file as we need to
# define ``BasePlotter`` before including it in the type definition.
_ALL_PLOTTERS: Dict[str, BasePlotter] = {}
def _kill_display(disp_id): # pragma: no cover
"""Forcibly close the display on Linux.
See: https://gitlab.kitware.com/vtk/vtk/-/issues/17917#note_783584
And more details into why...
https://stackoverflow.com/questions/64811503
Notes
-----
This is to be used experimentally and is known to cause issues
on `pyvistaqt`
"""
if platform.system() != 'Linux':
raise OSError('This method only works on Linux')
if disp_id:
cdisp_id = int(disp_id[1:].split('_')[0], 16)
# this is unsafe as events might be queued, but sometimes the
# window fails to close if we don't just close it
Thread(target=X11.XCloseDisplay, args=(cdisp_id,)).start()
|
main.pyw
|
import getpass, shutil, json, os, re, sys, subprocess
from time import sleep, perf_counter
from threading import Thread
import datetime as dt
from tkinter import ttk, filedialog, messagebox
import tkinter as Tk
# classes
from classes.logger import Logger
from classes.game import Game
from classes.backup import Backup
from classes.restore import Restore
# optional imports
try:
import requests
requests_installed = 1
except ModuleNotFoundError:
requests_installed = 0
try:
import winsound
winsound_installed = 1
except ModuleNotFoundError:
winsound_installed = 0
class Main(Logger):
# sets script directory in case current working directory is different
script_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(script_dir)
# settings setup
with open('config\settings.json') as json_file:
data = json.load(json_file)
backup_dest = data['setup']['backup_dest'] # backup destination setup
# redundancy settings
redundancy_limit = 4
backup_redundancy = data['optional_settings']['backup_redundancy']
if type(backup_redundancy) is not int or backup_redundancy not in range(1, redundancy_limit + 1):
backup_redundancy = 4
# optional settings
enter_to_quick_backup = data['optional_settings']['enter_to_quick_backup']
disable_resize = data['optional_settings']['disable_resize']
center_window = data['optional_settings']['center_window']
# compression
enable_compression = data['compression']['enable_compression']
compression_type = data['compression']['compression_type']
# debug
output = data['debug']['text_output']
enable_debug = data['debug']['enable_debug']
# scoring init
with open('config\scoring.json') as json_file:
scoring = json.load(json_file)
# var init
title = 'Game Save Manager'
allowed_filename_characters = '[^a-zA-Z0-9.,\s]'
backup_restore_in_progress = 0
default_entry_value = 'Type Search Query Here'
post_save_name = 'Post-Restore Save'
applist = None
drive_letters = []
# sets up search directories
username = getpass.getuser()
initialdir = "C:/"
search_directories = []
search_directories_incomplete = 1
best_dir = ''
# game class
game = Game(backup_dest=backup_dest, db_loc='config\game.db')
backup = Backup(game, compression_type)
restore = Restore(game, backup)
def backup_dest_check(self):
'''
Checks if backup destination in settings exists and asks if you want to choose one if it does not.
'''
Tk.Tk().withdraw()
if not os.path.exists(self.backup_dest):
msg = 'Do you want to choose a save backup directory instead of using a default within the program folder?'
response = messagebox.askyesno(title=self.title, message=msg)
if response:
self.backup_dest = filedialog.askdirectory(initialdir="C:/", title="Select Save Backup Directory")
if os.path.exists(self.backup_dest):
self.data['settings']['backup_dest'] = self.backup_dest
json_object = json.dumps(self.data, indent = 4) # Serializing json
with open('config\settings.json', "w") as outfile: # Writing to sample.json
outfile.write(json_object)
else:
messagebox.showwarning(title=self.title, message='Path does not exist.')
else:
os.mkdir(self.backup_dest)
def run_full_backup(self):
'''
Backups up the game entered based on SQLite save location data to the specified backup folder.
'''
def backup():
'''
Runs a single backup for the entered arg.
Also sets self.backup_restore_in_progress to True so the program wont quick during a backup.
'''
self.backup_restore_in_progress = 1
current_time = dt.datetime.now().strftime("%m-%d-%y %H-%M-%S")
dest = os.path.join(self.game.backup_loc, current_time)
if self.enable_compression:
self.backup.compress(self.game.save_location, dest)
else:
shutil.copytree(self.game.save_location, dest)
self.backup.delete_oldest(self.game.backup_loc, self.backup_redundancy, self.post_save_name)
sleep(.3)
# BUG total_size is wrong for some games right after it finishes backing up
self.game.get_backup_size()
total_backups = len(os.listdir(self.game.backup_loc))
info = f'{self.game.name} has been backed up.\n'\
f'Game Backup Size: {self.game.backup_size} from {total_backups} backups'
self.ActionInfo.config(text=info)
# BUG repeated presses replaces the wrong entry
self.game_listbox.delete(Tk.ACTIVE)
self.game_listbox.insert(0, self.game.name)
self.logger.info(f'Backed up Save for {self.game.name}.')
self.backup_restore_in_progress = 0
self.completion_sound()
if self.game.name == None:
messagebox.showwarning(title=self.title, message='No game is selected yet.')
return
self.ActionInfo.config(text=f'Backing up {self.game.name}\nDo not close program.')
try:
Thread(target=backup).start()
last_backup = dt.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
self.game.update_last_backup(self.game.name, last_backup)
except FileNotFoundError:
messagebox.showwarning(title=self.title, message='Action Failed - File location does not exist.')
self.logger.error(f'Failed to Backed up Save for {self.game.name}. File location does not exist.')
except FileExistsError:
messagebox.showwarning(title=self.title, message='Action Failed - Save Already Backed up.')
self.logger.error(f'Failed to Backed up Save for {self.game.name}. Save Already Backed up.')
except SystemExit:
print('Cancelled Backup.')
def tk_window_options(self, window_name, window_width, window_height, define_size=0):
'''
Disables window resize and centers window if config enables each.
'''
window_name.title(self.title)
if sys.platform == 'win32':
window_name.iconbitmap(window_name, 'images\Save_icon.ico')
if self.disable_resize: # sets window to not resize if disable_resize is set to 1
window_name.resizable(width=False, height=False)
if self.center_window == 1:
width_pos = int((window_name.winfo_screenwidth()-window_width)/2)
height_pos = int((window_name.winfo_screenheight()-window_height)/2)
if define_size:
window_name.geometry(f'{window_width}x{window_height}+{width_pos}+{height_pos}')
else:
window_name.geometry(f'+{width_pos}+{height_pos}')
def backup_shortcut(self, event):
'''
Shortcut that activates when pressing enter while a game is selected.
'''
response = messagebox.askquestion(
title=self.title,
message=f'Are you sure you want to backup {self.game.name}')
if response == 'yes':
self.run_full_backup()
else:
self.game_listbox.activate(0)
return
print(event)
def restore_save(self):
'''
Opens an interface for picking the dated backup of the selected game to restore.
First it checks if an existing save exists or if a game is even selected(Exits function if no game is selected).
'''
# TODO test Restore functions
# exits if no game is selected
if self.game.name == None:
messagebox.showwarning(title=self.title, message='No game is selected yet.')
return
self.backup_restore_in_progress = 1 # disables closing the interface until restore completes
# checks if the game has a backup folder
if os.path.exists(self.game.backup_loc):
# creates list of backups that can be restored
self.save_dic = {}
for file in os.scandir(self.game.backup_loc):
file_name = os.path.splitext(file.name)[0]
if file_name == self.post_save_name:
self.save_dic['Undo Last Restore'] = file
continue
try:
updated_name = dt.datetime.strptime(file_name, '%m-%d-%y %H-%M-%S').strftime('%b %d, %Y %I:%M %p')
except ValueError:
updated_name = file_name
self.save_dic[updated_name] = file
else:
# brings up a warning if no backup exists for the selected game.
messagebox.showwarning(title=self.title, message=f'No backed up saves exist for {self.game.name}.')
self.backup_restore_in_progress = 0
return
def close_restore_win():
'''
Notifies the program that the restore process is complete and closes the restore window.
'''
self.backup_restore_in_progress = 0
self.Restore_Game_Window.destroy()
def restore_selected_save():
'''
Restores selected game save based on save clicked within the Restore_Game_Window window.
'''
selected_backup = self.save_dic[save_listbox.get(save_listbox.curselection())]
full_save_path = os.path.join(self.backup_dest, self.game.name, selected_backup.name)
# check if the last post restore save is being restored
if self.post_save_name in selected_backup.name:
msg = 'This will delete the previously restored backup.'\
'\nAre you sure that you revert to the backup?'\
'\nThis will not send to the recycle bin.'
response = messagebox.askyesno(title=self.title, message=msg)
if response:
self.restore.delete_dir_contents(self.game.save_location)
self.restore.backup_orignal_save(selected_backup, full_save_path)
self.logger.info(f'Restored {self.post_save_name} for {self.game.name}.')
else:
# check if a last restore backup exists already
for item in os.scandir(os.path.join(self.backup_dest, self.game.name)):
if self.post_save_name in item.name:
msg = f'Backup of Post-Restore Save already exists.'\
'\nDo you want to delete it in order to continue?'
response = messagebox.askyesno(title=self.title, message=msg)
if response:
# finds the post_save_name
for f in os.scandir(os.path.join(self.backup_dest, self.game.name)):
if self.post_save_name in f.name:
# deletes the compressed file or deletes the entire folder tree
if self.backup.compressed(f.name):
os.remove(f)
else:
shutil.rmtree(f)
self.logger.info(f'Deleted original save before last restore for {self.game.name}.')
else:
print('Canceling Restore.')
self.Restore_Game_Window.grab_release()
return
dest = os.path.join(self.backup_dest, self.game.name, self.post_save_name)
self.backup.compress(self.game.save_location, dest)
self.restore.delete_dir_contents(self.game.save_location) # delete existing save
self.restore.backup_orignal_save(selected_backup, full_save_path)
close_restore_win()
self.Restore_Game_Window = Tk.Toplevel(takefocus=True)
self.Restore_Game_Window.protocol("WM_DELETE_WINDOW", close_restore_win)
window_width = 300
window_height = 220
self.tk_window_options(self.Restore_Game_Window, window_width, window_height)
self.Restore_Game_Window.grab_set()
RestoreInfo = ttk.Label(self.Restore_Game_Window,
text='Select save to restore for', font=("Arial Bold", 10))
RestoreInfo.grid(columnspan=2, row=0, column=0, pady=(10,0), padx=10)
RestoreGame = ttk.Label(self.Restore_Game_Window,
text=self.game.name, font=("Arial Bold", 10))
RestoreGame.grid(columnspan=2, row=1, column=0, pady=(0,10), padx=10)
save_listbox = Tk.Listbox(self.Restore_Game_Window, exportselection=False, font=("Arial Bold", 12), height=5,
width=30)
save_listbox.grid(columnspan=2, row=2, column=0, pady=5, padx=10)
for item in self.save_dic:
save_listbox.insert(Tk.END, item)
confirm_button = ttk.Button(self.Restore_Game_Window, text='Confirm', command=restore_selected_save, width=20)
confirm_button.grid(row=3, column=0, padx=10, pady=10)
CancelButton = ttk.Button(self.Restore_Game_Window, text='Cancel', command=close_restore_win, width=20)
CancelButton.grid(row=3, column=1, padx=10, pady=10)
self.Restore_Game_Window.mainloop()
def explore_folder(self, folder):
'''
Opens the selected games save location in explorer or backup folder.
Arguments:
folder -- Set to "Game Save" or "Backup" to determine folder that is opened in explorer
'''
if self.game.name == None:
messagebox.showwarning(title=self.title, message='No game is selected.')
elif folder == 'Game Save': # open game save location in explorer
if not os.path.isdir(self.game.save_location):
msg = f'Save location for {self.game.name} no longer exists'
messagebox.showwarning(title=self.title, message=msg)
subprocess.Popen(f'explorer "{self.game.save_location}"')
elif folder == 'Backup': # open game backup location in explorer
if not os.path.isdir(self.game.backup_loc):
messagebox.showwarning(title=self.title, message=f'{self.game.name} has not been backed up yet.')
subprocess.Popen(f'explorer "{self.game.backup_loc}"')
def add_game_to_database(self):
'''
Adds game to database using entry inputs.
'''
game_name = self.GameNameEntry.get()
save_location = self.GameSaveEntry.get().replace('/', '\\')
if len(self.game.get_filename(game_name)) == 0:
messagebox.showwarning(title=self.title,message=f'Game name has no legal characters for a filename')
return
if self.game.exists_in_db(game_name):
msg = f"Can't add {self.game.name} to database.\nGame already exists."
messagebox.showwarning(title=self.title, message=msg)
else:
if os.path.isdir(save_location):
self.game.add(game_name, save_location)
# delete entry data
self.GameSaveEntry.delete(0, Tk.END)
self.GameNameEntry.delete(0, Tk.END)
# update listbox with new game
self.sorted_list.insert(0, game_name)
self.game_listbox.insert(0, game_name)
self.update_listbox()
self.logger.info(f'Added {game_name} to database.')
else:
msg = f'Save Location for {self.game.name} does not exist.'
messagebox.showwarning(title=self.title, message=msg)
def find__drive_letters(self):
'''
Finds the active drive letters for storage.
'''
with os.popen("fsutil fsinfo drives") as data:
letter_output = data.readlines()[1]
words = re.findall('\S+', letter_output)[1:]
result = []
for letters in words:
result.append(letters[0])
if self.enable_debug:
print(result)
return result
def find_search_directories(self):
'''
Finds the directories to use when searching for games.
'''
start = perf_counter()
# os specific settings
platform = sys.platform
if platform == 'win32':
dirs_to_check = [
rf":/Users/{self.username}/AppData/Local",
rf":/Users/{self.username}/AppData/LocalLow",
rf":/Users/{self.username}/AppData/Roaming",
rf":/Users/{self.username}/Saved Games",
rf":/Users/{self.username}/Documents",
r":/Program Files (x86)/Steam/steamapps/common",
r":/Program Files/Steam/steamapps/common"
]
self.drive_letters = self.find__drive_letters()
elif platform == 'linux':
# TODO add linux support to find_search_directories
dirs_to_check = ['$HOME/.local/share/Steam/userdata']
# starts directory check
for dir in dirs_to_check:
for letter in self.drive_letters:
current_dir = letter + dir
if os.path.isdir(current_dir):
if 'documents' in current_dir.lower():
self.initialdir = current_dir
self.search_directories.append(current_dir)
for custom_saved_dir in self.data['custom_save_directories']:
self.search_directories.append(custom_saved_dir)
if self.enable_debug:
print(self.search_directories)
finish = perf_counter() # stop time for checking elapsed runtime
elapsed_time = round(finish-start, 2)
if self.enable_debug:
print(f'find_search_directories: {elapsed_time} seconds')
self.search_directories_incomplete = 0
def open_smart_browse_window(self):
'''
Smart Browse Progress window
'''
# closes window if it is already open so a new one can be created
# TODO switch to method without try block
try:
self.smart_browse_win.destroy()
except AttributeError:
pass
# opens window
self.smart_browse_win = Tk.Toplevel(self.main_gui)
self.smart_browse_win.attributes('-topmost', 'true')
self.tk_window_options(self.smart_browse_win, 340, 130, define_size=0)
text = f'Looking for the game save directory for\n{self.GameNameEntry.get()}'
self.info_label = Tk.Label(self.smart_browse_win, text=text, font=("Arial Bold", 10))
self.info_label.grid(row=0, column=0, pady=(9))
self.progress = ttk.Progressbar(self.smart_browse_win, orient=Tk.HORIZONTAL, length=360, mode='determinate')
self.progress.grid(row=1, column=0, pady=(5,10), padx=20)
self.s_browse = ttk.Button(self.smart_browse_win, text='Browse', command=lambda: self.browse(self.best_dir),
width=23)
self.s_browse.grid(row=2, column=0, pady=(5,10))
self.s_browse.config(state='disabled')
self.smart_browse_win.focus_force()
@staticmethod
def nonascii(string):
'''
Returns the given string with ASCII characters removed.
'''
return string.encode("ascii", "ignore").decode()
@staticmethod
def completion_sound():
'''
Makes a sound denoting a task completion.
'''
if sys.platform == 'win32':
if winsound_installed:
winsound.PlaySound("Exclamation", winsound.SND_ALIAS)
def dir_scoring(self, possible_dir):
'''
Uses a scoring system to determines the chance of the given directory to be the save location.
'''
# checks if possible_dir is in the blacklist
dir_blacklist = self.scoring['dir_blacklist']
for string in dir_blacklist:
if string.lower() in possible_dir.lower():
return 0
# prints possible_dir if enable_debug is 1 and the var is not blank
if possible_dir != '' and self.enable_debug:
print(f'\n{possible_dir}')
current_score = 0
for found_root, found_dirs, found_files in os.walk(possible_dir, topdown=False):
for found_file in found_files:
# file scoring TODO add a way to track scoring that applies
# + scorers
for item, score in self.scoring['file_positive_scoring'].items():
if item in found_file.lower():
current_score += score
# - scorers
for item, score in self.scoring['file_negative_scoring'].items():
if item in found_file.lower():
current_score -= score
for found_dir in found_dirs:
# folder scoring
# + scorers
for item, score in self.scoring['folder_positive_scoring'].items():
if item in found_dir.lower():
current_score += score
# - scorers
for item, score in self.scoring['folder_negative_scoring'].items():
if item in found_dir.lower():
current_score -= score
if self.enable_debug:
print(f'Score {current_score}')
return current_score
def get_appid(self, game):
'''
Checks the Steam App list for a game and returns its app id if it exists as entered.
'''
if self.applist == None:
applist = 'http://api.steampowered.com/ISteamApps/GetAppList/v0002/'
data = requests.get(applist)
if data.status_code != requests.codes.ok:
return None
self.applist = data.json()['applist']['apps']
for item in self.applist:
if item["name"] == game:
return item['appid']
return None
def check_userdata(self, app_id):
'''
Checks for a save folder within the steam userdata folder by looking for the given games app_id.
'''
existing_paths = []
if len(self.drive_letters) == 0:
self.drive_letters = self.find__drive_letters()
for letter in self.drive_letters:
path = f'{letter}:/Program Files (x86)/Steam/userdata'
if os.path.exists(path):
existing_paths.append(path)
for path in existing_paths:
for dirpath, dirnames, filenames in os.walk(path):
for dir in dirnames:
found_path = os.path.join(dirpath, dir)
if str(app_id) in found_path:
return found_path.replace('/', '\\')
return False
def game_save_location_search(self, full_game_name, test=0):
'''
Searches for possible save game locations for the given name using a point based system.
The highes scoring directory is chosen.
'''
# TODO split into more functions
# var setup
overall_start = perf_counter() # start time for checking elapsed runtime
best_score = 0
dir_changed = 0
current_score = 0
possible_dir = ''
search_method = 'name search'
self.best_dir = self.initialdir
if self.enable_debug:
print(f'\nGame: {self.game.filename}')
# waits for search directories to be ready before the save search is started
while self.search_directories_incomplete:
sleep(.1)
# disables progress bar actions when testing
if test == 0:
self.progress['maximum'] = len(self.search_directories) + 1
for directory in self.search_directories:
if self.enable_debug:
print(f'\nCurrent Search Directory: {directory}')
directory_start = perf_counter()
for root, dirs, files in os.walk(directory, topdown=False):
for dir in dirs:
if self.game.get_filename(full_game_name).lower().replace(' ', '') in dir.lower().replace(' ', ''):
possible_dir = os.path.join(root, dir)
current_score = self.dir_scoring(possible_dir)
# update based on high score
directory_finish = perf_counter()
if self.enable_debug:
print(f'Dir Search Time: {round(directory_finish-directory_start, 2)} seconds')
# disables progress bar actions when testing
if test == 0:
self.progress['value'] += 1
if current_score > best_score:
best_score = current_score
self.best_dir = os.path.abspath(possible_dir)
# early break if threshold is met
if current_score > 600:
break
current_score = 0
overall_finish = perf_counter() # stop time for checking elapsed runtime
elapsed_time = round(overall_finish-overall_start, 2)
if self.enable_debug:
print(f'\n{self.game.filename}\nOverall Search Time: {elapsed_time} seconds')
print(f'Path Used: {self.best_dir}')
print(f'Path Score: {best_score}')
# checks if nothing was found from the first search
if self.best_dir == self.initialdir:
if requests_installed:
app_id = self.get_appid(full_game_name)
if app_id != None:
app_id_path = self.check_userdata(app_id)
if app_id_path is not False:
self.best_dir = app_id_path
search_method = 'app id search'
else:
self.logger.info(f'No Game save can be found for {full_game_name}')
else:
self.logger.info(f'app_id cant be found for {full_game_name}')
if test == 0:
game_save = os.path.abspath(self.GameSaveEntry.get())
if game_save != self.script_dir:
if self.best_dir in game_save:
print('Found save is correct.')
else:
print('Found save is incorrect.')
dir_changed = 1
else:
return self.best_dir
self.progress['value'] = self.progress['maximum']
# completion time output
limit = 50
if len(self.best_dir) > limit:
info = f'Path Found in {elapsed_time} seconds\n...{self.best_dir[-limit:]}'
else:
info = f'Path Found in {elapsed_time} seconds\n{self.best_dir[-limit:]}'
self.logger.info(f'Save for "{full_game_name}" found in {elapsed_time} seconds via {search_method}.')
self.info_label.config(text=info)
self.completion_sound()
# enables the browse button when a save folder seems to be found
if self.best_dir != self.initialdir:
if dir_changed:
# adds info that the found save location is not the same as the save location in the entry box
info += f'\nFound directory is different then entered directory.'
self.s_browse.config(state='normal')
def smart_browse(self):
'''
Searches for a starting point for the save location browser.
'''
# checks if no game name is in entry box.
game_name = self.GameNameEntry.get()
if game_name == None:
messagebox.showwarning(
title=self.title,
message='Smart Browse requires a game name to be entered.')
return
self.open_smart_browse_window()
# looks for folders with the games name
Thread(target=self.game_save_location_search, args=(game_name,), daemon=True).start()
def browse(self, initial_dir=None):
'''
Opens a file dialog so a save directory can be chosen.
It starts in the My Games folder in My Documents if it exists within a limited drive letter search.
'''
if initial_dir == None:
starting_point = self.initialdir
current_save_location = self.GameSaveEntry.get()
if os.path.exists(current_save_location):
starting_point = current_save_location
else:
starting_point = initial_dir
self.smart_browse_win.destroy()
save_dir = filedialog.askdirectory(initialdir=starting_point, title="Select Save Directory")
self.GameSaveEntry.delete(0, Tk.END)
self.GameSaveEntry.insert(0, save_dir)
def delete_game(self):
'''
Deletes selected game from SQLite Database.
'''
if self.game.name == None:
messagebox.showwarning(title=self.title, message='No game is selected.')
return
delete_check = messagebox.askyesno(
title=self.title,
message=f'Are you sure that you want to delete {self.game.name}?')
if delete_check:
self.game.delete_from_db()
# deletes game from game_listbox and sorted_list
index = self.game_listbox.get(0, Tk.END).index(self.game.name)
self.game_listbox.delete(index)
self.sorted_list.pop(index)
self.update_listbox()
# checks if you want to delete the games save backups as well
if os.path.isdir(self.game.backup_loc):
response = messagebox.askyesno(
title=self.title,
message='Do you want to delete the backed up saves as well?')
if response:
try:
shutil.rmtree(self.game.backup_loc)
self.logger.info(f'Deleted backups for{self.game.name}.')
except PermissionError:
self.logger.warning(f'Failed to delete backups for {self.game.name}')
messagebox.showerror(title=self.title, message='Failed to delete directory\nPermission Error')
self.logger.info(f'Deleted {self.game.name} from database.')
def update_game(self):
'''
Allows updating data for games in database.
The last selected game in the Listbox gets updated with the info from the Add/Update Game entries.
'''
if self.game.name == None:
messagebox.showwarning(title=self.title, message='No game is selected yet.')
return
# gets entered game info
game_name = self.GameNameEntry.get()
save_location = self.GameSaveEntry.get().replace('/', '\\')
if os.path.isdir(save_location):
old_save = self.game.save_location
old_name = self.game.name
old_backup = self.game.backup_loc
self.game.update(self.game.name, game_name, save_location)
# error when path is changed
print(old_backup)
print(self.game.backup_loc)
os.rename(old_backup, self.game.backup_loc)
# updates listbox entry for game
if len(self.game_listbox.curselection()) != 0:
index = self.game_listbox.curselection()
else:
index = 0
self.game_listbox.delete(Tk.ACTIVE)
self.game_listbox.insert(index, game_name)
self.logger.info(f'Updated {self.game.name} in database.')
else:
messagebox.showwarning(title=self.title, message='Save Location does not exist.')
@staticmethod
def readable_time_since(datetime_obj):
'''
Gives time since for a datetime object in the unit of time that makes the most sense
rounded to 1 decimal place.
Arguments:
datetime_obj -- datetime object that will have the current date subtracted from it
'''
seconds = (dt.datetime.now() - datetime_obj).total_seconds()
if seconds < (60 * 60): # seconds in minute * minutes in hour
minutes = round(seconds / 60, 1) # seconds in a minute
return f' {minutes} minutes ago'
elif seconds < (60 * 60 * 24): # seconds in minute * minutes in hour * hours in a day
hours = round(seconds / (60 * 60), 1) # seconds in minute * minutes in hour
return f' {hours} hours ago'
else:
days = round(seconds / 86400, 1) # seconds in minute * minutes in hour * hours in a day
return f' {days} days ago'
def toggle_buttons(self, action=''):
'''
Disables all buttons within the buttons list.
'''
if action == 'disable':
buttons = [self.ExploreBackupButton, self.ExploreSaveButton, self.BackupButton, self.RestoreButton]
for button in buttons:
button.config(state='disabled')
else:
# enables buttons that should be enabled if a game is selected
for button in [self.BackupButton, self.ExploreSaveButton]:
button.config(state='normal')
# emables buttons that should be enabled if the selected game has a backup folder otherwise disables
if os.path.isdir(self.game.backup_loc):
set_state = 'normal'
else:
set_state = 'disabled'
for button in [self.ExploreBackupButton, self.RestoreButton]:
button.config(state=set_state)
def update_listbox(self, data=None):
'''
Deletes current listbox items and adds the given data in.
'''
if data == None:
# refreshes the value of sorted_list
data = self.sorted_list
self.game_listbox.delete(0, Tk.END)
for item in data:
self.game_listbox.insert(Tk.END, item)
self.ActionInfo.config(text='Select a Game\nto continue')
# updates title info label
info_text = f'Total Games: {len(self.sorted_list)}\n'\
f'Total Backup Size: {self.game.convert_size(self.backup_dest)}'
self.Title.config(text=info_text)
self.toggle_buttons('disable')
def entry_search(self, e):
'''
Finds all items in the sorted_list that have the search box data in it.
It then updates the listbox data to only include matching results.
'''
# TODO Test to be sure threading here does not cause issues.
def search():
typed = self.search_entry.get()
if typed == '':
data = self.sorted_list
else:
data = []
for item in self.sorted_list:
if typed.lower() in item.lower():
data.append(item)
self.update_listbox(data)
Thread(target=search, daemon=True).start()
def select_entry(self, e):
'''
Deletes only search box default text on click.
'''
if self.search_entry.get() == self.default_entry_value:
self.search_entry.delete(0, Tk.END)
def listbox_nav(self, e):
'''
Allows Up and Down arrow keys to navigate the listbox.
'''
index = self.game_listbox.curselection()[0]
if e.keysym == 'Up':
index += -1
if e.keysym == 'Down':
index += 1
if 0 <= index < self.game_listbox.size():
self.game_listbox.selection_clear(0, Tk.END)
self.game_listbox.select_set(index)
self.game_listbox.selection_anchor(index)
self.game_listbox.activate(index)
def unfocus_entry(self, e):
'''
Resets search box to default_entry_value when it loses focus.
'''
self.search_entry.delete(0, Tk.END)
self.search_entry.insert(0, self.default_entry_value)
def select_listbox_entry(self, Update = 0):
'''
Updates Game Data into Name and Save Entry for viewing.
Allows for updating specific entries in the database as well.
Arguments:
Update -- 1 or 0 (default = 0)
'''
self.game.set(self.game_listbox.get(self.game_listbox.curselection()))
# ignores function if listbox is empty
if self.game_listbox.size() == 0:
return
# clears entry boxes
self.GameNameEntry.delete(0, Tk.END)
self.GameSaveEntry.delete(0, Tk.END)
if self.backup_restore_in_progress:
return
# updates entry boxes to show currently selected game in listbox
if Update == 1:
# game name and entry box update
self.GameNameEntry.insert(0, self.game.name)
self.GameSaveEntry.insert(0, self.game.save_location)
# search box update
self.search_entry.delete(0, Tk.END)
self.search_entry.insert(0, self.default_entry_value)
# enables all buttons to be pressed once a selection is made
self.toggle_buttons()
total_size = self.game.convert_size(self.game.backup_loc)
if self.game.last_backup == 'Never':
info = f'{self.game.name} has not been backed up\n'
else:
time_since = self.readable_time_since(dt.datetime.strptime(self.game.last_backup, '%Y/%m/%d %H:%M:%S'))
info = f'{self.game.name} was last backed up {time_since}\n'\
f'Game Backup Size: {total_size} from {len(os.listdir(self.game.backup_loc))} backups'
self.ActionInfo.config(text=info)
self.BackupButton.focus_set()
def exit_program(self):
'''
Closes the database and quits the program when closing the interface.
'''
if self.backup_restore_in_progress:
msg = f'Backup/Restore in progress.\n{self.title} will close after completion when you close this message.'
messagebox.showerror(title=self.title, message=msg)
while self.backup_restore_in_progress:
sleep(.1)
# BUG fails to exit if filedialog is left open
# fix using subclassed filedialog commands that can close it
exit()
def open_interface_window(self):
'''
Opens the main Game Save Manager interface.
'''
start = perf_counter()
# Defaults
BoldBaseFont = "Arial Bold"
self.main_gui = Tk.Tk()
self.main_gui.protocol("WM_DELETE_WINDOW", self.exit_program)
window_width = 680
window_height = 550
self.tk_window_options(self.main_gui, window_width, window_height)
# self.main_gui.geometry(f'{window_width}x{window_height}+{width}+{height}')
# binding
if self.enter_to_quick_backup:
self.main_gui.bind('<Return>', self.backup_shortcut)
# Main Row 0
Backup_Frame = Tk.Frame(self.main_gui)
Backup_Frame.grid(columnspan=4, column=0, row=0, padx=(20, 20), pady=(5, 0))
self.Title = Tk.Label(Backup_Frame, text='\n', font=(BoldBaseFont, 10))
self.Title.grid(columnspan=4, row=0, column=1)
button_width = 23
self.BackupButton = ttk.Button(Backup_Frame, text='Backup Save', state='disabled',
command=self.run_full_backup, width=button_width)
self.BackupButton.grid(row=3, column=1, padx=5, pady=5)
self.RestoreButton = ttk.Button(Backup_Frame, text='Restore Save', state='disabled',
command=self.restore_save, width=button_width)
self.RestoreButton.grid(row=3, column=2, padx=5)
self.ExploreSaveButton = ttk.Button(Backup_Frame, text='Explore Save Location', state='disabled',
command=lambda: self.explore_folder('Game Save'), width=button_width)
self.ExploreSaveButton.grid(row=4, column=1, padx=5)
self.ExploreBackupButton = ttk.Button(Backup_Frame, text='Explore Backup Location', state='disabled',
command=lambda: self.explore_folder('Backup'), width=button_width)
self.ExploreBackupButton.grid(row=4, column=2, padx=5)
# Main Row 1
instruction = 'Select a Game\nto continue'
self.ActionInfo = Tk.Label(self.main_gui, text=instruction, font=(BoldBaseFont, 10))
self.ActionInfo.grid(columnspan=4, row=1, column=0, padx=5, pady= 5)
# Main Row 2
self.ListboxFrame = Tk.Frame(self.main_gui)
self.ListboxFrame.grid(columnspan=4, row=2, column=0, padx=(20, 20), pady=(5, 10))
self.scrollbar = Tk.Scrollbar(self.ListboxFrame, orient=Tk.VERTICAL)
self.scrollbar.grid(row=1, column=3, sticky='ns', rowspan=3)
self.search_entry = Tk.ttk.Entry(self.ListboxFrame, width=89, exportselection=0)
self.search_entry.grid(columnspan=3, row=0, column=0, pady=(0, 3))
self.search_entry.insert(0, self.default_entry_value)
self.search_entry.bind('<1>', self.select_entry)
self.search_entry.bind('<FocusOut>', self.unfocus_entry)
self.search_entry.bind('<KeyRelease>', self.entry_search)
self.game_listbox = Tk.Listbox(self.ListboxFrame, exportselection=False, yscrollcommand=self.scrollbar.set,
font=(BoldBaseFont, 12), height=10, width=60)
self.game_listbox.grid(columnspan=3, row=1, column=0)
self.game_listbox.bind('<<ListboxSelect>>', lambda event,
game_listbox=self.game_listbox,:self.select_listbox_entry(1))
# TODO finish or delete up and down control of listbox
# full interface bind for lisxtbox navigation
# self.main_gui.bind('<Up>', lambda event,arg=.1:self.listbox_nav(event))
# self.main_gui.bind('<Down>', lambda event,arg=.1:self.listbox_nav(event))
# scrollbar config
self.scrollbar.config(command=self.game_listbox.yview)
# listbox fill
self.sorted_list = self.game.sorted_games()
self.update_listbox()
# Main Row 3
Add_Game_Frame = Tk.LabelFrame(self.main_gui, text='Manage Games')
Add_Game_Frame.grid(columnspan=4, row=3, padx=15, pady=(5, 17))
EnterGameLabel = Tk.ttk.Label(Add_Game_Frame, text='Enter Game Name')
EnterGameLabel.grid(row=0, column=0)
entry_width = 65
self.GameNameEntry = Tk.ttk.Entry(Add_Game_Frame, width=entry_width, exportselection=0)
self.GameNameEntry.grid(row=0, column=1, columnspan=3, pady=8, padx=5)
EnterSaveLabeL = Tk.ttk.Label(Add_Game_Frame, text='Enter Save Location')
EnterSaveLabeL.grid(row=1, column=0)
self.GameSaveEntry = Tk.ttk.Entry(Add_Game_Frame, width=entry_width, exportselection=0)
self.GameSaveEntry.grid(row=1, column=1, columnspan=3, pady=5, padx=10)
browse_button_width = 13
SmartBrowseButton = Tk.ttk.Button(Add_Game_Frame, text='Smart Browse', width=browse_button_width,
command=self.smart_browse)
SmartBrowseButton.grid(row=0, column=4, padx=10)
BrowseButton = Tk.ttk.Button(Add_Game_Frame, text='Browse', width=browse_button_width,
command=self.browse)
BrowseButton.grid(row=1, column=4, padx=10)
# Button Frame Row 2
Button_Frame = Tk.Frame(Add_Game_Frame)
Button_Frame.grid(columnspan=5, row=2, pady=(5, 5))
button_padx = 4
button_pady = 5
ConfirmAddButton = Tk.ttk.Button(Button_Frame, text='Add Game',
command=self.add_game_to_database, width=16)
ConfirmAddButton.grid(row=2, column=0, padx=button_padx, pady=button_pady)
UpdateButton = Tk.ttk.Button(Button_Frame, text='Update Game',
command=self.update_game, width=16)
UpdateButton.grid(row=2, column=1, padx=button_padx, pady=button_pady)
RemoveButton = ttk.Button(Button_Frame, text='Remove Game',
command=self.delete_game, width=16)
RemoveButton.grid(row=2, column=2, padx=button_padx, pady=button_pady)
ClearButton = Tk.ttk.Button(Button_Frame, text='Clear Entries',
command=self.select_listbox_entry, width=16)
ClearButton.grid(row=2, column=3, padx=button_padx, pady=button_pady)
ClearButton = Tk.ttk.Button(Button_Frame, text='Refresh Games', command=self.update_listbox, width=16)
ClearButton.grid(row=2, column=4, padx=button_padx, pady=button_pady)
self.game.database_check()
# interface startup time check
end = perf_counter()
start_elapsed = round(end-start, 2)
if start_elapsed > 1:
print('Interface Ready: ', start_elapsed)
self.main_gui.mainloop()
def run(self):
'''
Runs everything needed to make the program work.
'''
start = perf_counter()
if self.output:
sys.stdout = open("output.txt", "w")
self.backup_dest_check()
Thread(target=self.find_search_directories).start()
# main startup check
end = perf_counter()
start_elapsed = round(end-start, 2)
if start_elapsed > 1:
print('Pre Interface check ready: ', start_elapsed)
# opens the interface
self.open_interface_window()
if self.output:
sys.stdout.close()
if __name__ == '__main__':
Main().run()
|
test_messaging.py
|
import multiprocessing
import pytest
import time
from datetime import datetime
from pocs.utils.messaging import PanMessaging
@pytest.fixture(scope='module')
def mp_manager():
return multiprocessing.Manager()
@pytest.fixture(scope='function')
def forwarder(mp_manager):
ready = mp_manager.Event()
done = mp_manager.Event()
def start_forwarder():
PanMessaging.create_forwarder(
12345, 54321, ready_fn=lambda: ready.set(), done_fn=lambda: done.set())
messaging = multiprocessing.Process(target=start_forwarder)
messaging.start()
if not ready.wait(timeout=10.0):
raise Exception('Forwarder failed to become ready!')
# Wait a moment for the forwarder to start using those sockets.
time.sleep(0.05)
yield messaging
# Stop the forwarder. Since we use the same ports in multiple
# tests, we wait for the process to shutdown.
messaging.terminate()
for _ in range(100):
# We can't be sure that the sub-process will succeed in
# calling the done_fn, so we also check for the process
# ending.
if done.wait(timeout=0.01):
break
if not messaging.is_alive():
break
def test_forwarder(forwarder):
assert forwarder.is_alive() is True
@pytest.fixture(scope='function')
def pub_and_sub(forwarder):
# Ensure that the subscriber is created first.
sub = PanMessaging.create_subscriber(54321)
time.sleep(0.05)
pub = PanMessaging.create_publisher(12345, bind=False, connect=True)
time.sleep(0.05)
yield (pub, sub)
pub.close()
sub.close()
def test_send_string(pub_and_sub):
pub, sub = pub_and_sub
pub.send_message('Test-Topic', 'Hello')
topic, msg_obj = sub.receive_message()
assert topic == 'Test-Topic'
assert isinstance(msg_obj, dict)
assert 'message' in msg_obj
assert msg_obj['message'] == 'Hello'
def test_send_datetime(pub_and_sub):
pub, sub = pub_and_sub
pub.send_message('Test-Topic', {'date': datetime(2017, 1, 1)})
topic, msg_obj = sub.receive_message()
assert msg_obj['date'] == '2017-01-01T00:00:00'
def test_storage_id(pub_and_sub, config, db):
id0 = db.insert_current('config', {'foo': 'bar'}, store_permanently=False)
pub, sub = pub_and_sub
pub.send_message('Test-Topic', db.get_current('config'))
topic, msg_obj = sub.receive_message()
assert '_id' in msg_obj
assert isinstance(msg_obj['_id'], str)
assert id0 == msg_obj['_id']
################################################################################
# Tests of the conftest.py messaging fixtures.
def test_message_forwarder_exists(message_forwarder):
assert isinstance(message_forwarder, dict)
assert 'msg_ports' in message_forwarder
assert isinstance(message_forwarder['msg_ports'], tuple)
assert len(message_forwarder['msg_ports']) == 2
assert isinstance(message_forwarder['msg_ports'][0], int)
assert isinstance(message_forwarder['msg_ports'][1], int)
assert isinstance(message_forwarder['cmd_ports'], tuple)
assert len(message_forwarder['cmd_ports']) == 2
assert isinstance(message_forwarder['cmd_ports'][0], int)
assert isinstance(message_forwarder['cmd_ports'][1], int)
# The ports should be unique.
msg_ports = message_forwarder['msg_ports']
cmd_ports = message_forwarder['cmd_ports']
ports = set(list(msg_ports) + list(cmd_ports))
assert len(ports) == 4
def assess_pub_sub(pub, sub):
"""Helper method for testing a pub-sub pair."""
# Can not send a message using a subscriber
with pytest.raises(Exception):
sub.send_message('topic_name', 'a string')
# Can not receive a message using a publisher
assert (None, None) == pub.receive_message(blocking=True)
# At first, there is nothing available to receive.
assert (None, None) == sub.receive_message(blocking=True, timeout_ms=500)
pub.send_message('topic.name', 'a string')
topic, msg_obj = sub.receive_message()
assert isinstance(msg_obj, dict)
assert 'message' in msg_obj
assert msg_obj['message'] == 'a string'
assert 'timestamp' in msg_obj
def test_msg_pub_sub(msg_publisher, msg_subscriber):
assess_pub_sub(msg_publisher, msg_subscriber)
def test_cmd_pub_sub(cmd_publisher, cmd_subscriber):
assess_pub_sub(cmd_publisher, cmd_subscriber)
|
roast.py
|
"""
The implementation module for roast.vim plugin. This module does most of the heavy lifting for the functionality
provided by the plugin.
Example: Put the following in a `api.roast` file and hit `<Leader><CR>` on it.
GET http://httpbin.org/get name=value
Inspiration / Ideas:
https://github.com/Huachao/vscode-restclient
https://github.com/baverman/vial-http
"""
from collections import defaultdict
import warnings
from threading import Thread
import requests
import vim
import roast_api
sessions = defaultdict(requests.Session)
verify_ssl = True
renderers = [
'pretty',
'headers',
]
IS_NEOVIM = vim.eval("has('nvim')") == '1'
CURRENT_RESPONSE = None
def run(*, use=None):
request = roast_api.build_request(vim.current.buffer, vim.current.range.end, use_overrides=use)
if IS_NEOVIM:
run_th(request, vim.current.buffer.number, vim.current.range.end)
else:
Thread(target=run_th, args=(request, vim.current.buffer.number, vim.current.range.end), daemon=True).start()
def run_th(request, buf_number, line_number):
global CURRENT_RESPONSE
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', requests.urllib3.exceptions.InsecureRequestWarning)
response = request.send(sessions[buf_number])
except OSError as e:
show_error(str(e))
else:
CURRENT_RESPONSE = response
vim.eval("timer_start(10, {_ -> py3eval('roast.show_response_current()')})")
vim.eval("timer_start(10, {_ -> py3eval('roast.highlight_line(\"" +
('RoastCurrentSuccess' if response.ok else 'RoastCurrentFailure') +
'", ' + str(buf_number) + ', ' + str(line_number) + ")')})")
def show_response_current():
show_response(CURRENT_RESPONSE)
def show_response(response: requests.Response):
# A window holding a roast buffer, to be used as a workspace for setting up all roast buffers.
workspace_window = workspace_renderer = None
for window in vim.windows:
if '_roast_renderer' in window.buffer.vars:
workspace_window = window
workspace_renderer = window.buffer.vars['_roast_renderer']
if not isinstance(workspace_renderer, str):
workspace_renderer = workspace_renderer.decode()
break
# Switch to workspace window.
prev_window = vim.current.window
for renderer in renderers:
buf_name = f'__roast_{renderer}__'
num = bufnr(buf_name)
if num < 0:
if workspace_window is not None:
vim.current.window = workspace_window
vim.command(f'keepalt edit {buf_name} | setl bt=nofile bh=hide noswf nornu')
num = bufnr(buf_name)
else:
vim.command(f'keepalt vnew {buf_name} | setl bt=nofile bh=hide noswf nornu')
num = bufnr(buf_name)
vim.current.window = workspace_window = vim.windows[int(vim.eval(f'bufwinnr({num})')) - 1]
else:
if workspace_window is not None:
vim.current.window = workspace_window
vim.command(f'keepalt {num}buffer')
else:
vim.command(f'keepalt vertical {num}sbuffer')
vim.current.window = workspace_window = vim.windows[int(vim.eval(f'bufwinnr({num})')) - 1]
buf = vim.buffers[num]
buf[:] = None
buf.vars['_roast_renderer'] = renderer
actions = getattr(roast_api, f'render_{renderer}')(buf, response)
apply_actions(buf, actions)
vim.command(f'{workspace_window.number}windo keepalt buffer __roast_{workspace_renderer or renderers[0]}__')
workspace_window.options['statusline'] = "Roast <%{get(b:, '_roast_renderer', 'N/A')}> " + \
('' if response.ok else '%#Error#') + " HTTP:" + str(response.status_code) + " %* %{&ft}"
vim.current.window = prev_window
def show_error(message: str):
vim.vars['__roast_error_message'] = message
vim.eval("timer_start(10, {_ -> execute(['echohl Error', 'redraw', 'echomsg g:__roast_error_message',"
" 'echohl None', 'unlet g:__roast_error_message'], '')})")
def highlight_line(group, buf_number, line_number):
match_id = int(vim.buffers[buf_number].vars.get('_roast_match_id', 0))
win = None
for win in vim.windows:
if win.buffer.number == buf_number:
break
if match_id:
try:
vim.eval(f'matchdelete({match_id})' if win is None else f'matchdelete({match_id}, {win.number})')
except vim.error:
# TODO: Only hide E803 error, which is thrown if this match_id has already been deleted.
pass
vim.buffers[buf_number].vars['_roast_match_id'] = \
vim.eval(f"matchadd('{group}', '\\%{line_number + 1}l', 10, -1, {{'window': {win.number}}})")
def apply_actions(buf, actions):
if 'lines' in actions:
buf[:] = actions['lines']
if 'commands' in actions:
for cmd in actions['commands']:
vim.command(cmd)
def next_render(delta=1):
renderer = vim.current.buffer.vars['_roast_renderer']
if not isinstance(renderer, str):
renderer = renderer.decode()
vim.command('buffer __roast_' + renderers[(renderers.index(renderer) + delta) % len(renderers)] + '__')
def prev_render():
next_render(-1)
def bufnr(name) -> int:
return int(vim.eval(f'bufnr("{name}")'))
|
AutoBookTKB-GUI.py
|
# !/usr/bin/python
# -*-coding:utf-8 -*-
import tkinter as tk
from tkinter import ttk
import json
import sys
import threading
class AutoBookTKB_GUI:
def __init__(self, master):
self.load_json('AutoBookTKB-settings.json')
self.master = master
self.master.title("AutoBookTKB")
self.id_box = ttk.Entry(self.master) # Id: Entry
self.id_box.insert(tk.END, self.settings['id']) # Set default
self.pwd_box = ttk.Entry(self.master,show='*') # Password: Entry
self.pwd_box.insert(tk.END, self.settings['password']) # Set default
# Course: Combobox.
self.course_chosen = tk.StringVar()
self.course = ttk.Combobox(self.master, width=18,
textvariable=self.course_chosen)
self.course['values'] = (u"請選擇課程:", 1, 2, 3, 4, 5, 6, 7, 8, 9)
# Set default
if self.settings['classIndex'].isdigit():
self.course.current(self.settings['classIndex'])
else:
self.course.current(0)
# Date: Label
self.date = ttk.Label(master, text=self.get_date_text())
# Location: Combobox
self.location_chosen = tk.StringVar()
self.location = ttk.Combobox(self.master, width=18,
textvariable=self.location_chosen)
with open('locationList.json', 'r', encoding="utf-8") as fp:
self.location_list = json.load(fp)
fp.close()
self.mylist = []
self.mylist.append(u'請選擇預約地點')
for key in self.location_list:
self.mylist.append(key)
self.location['values'] = tuple(self.mylist)
# Set default
if self.settings['location']:
self.location.current(self.mylist.index(self.settings['location']))
else:
self.location.current(0)
# Sessions: Checkbuttons
self.session_checked = [0, 0, 0, 0, 0, 0]
self.session_checkbuttons = []
for s in range(len(self.session_checked)):
self.session_checked[s] = tk.IntVar()
chkbut = tk.Checkbutton(self.master, text=str(s),
variable=self.session_checked[s],
onvalue=1, offvalue=0)
self.session_checkbuttons.append(chkbut)
# Set default
if s in self.settings['sessions']:
chkbut.select()
# Send: Button
self.send_button = ttk.Button(self.master, text=u"送出", command=self.send)
self.console = ttk.Entry(self.master)
self.master.geometry('450x600')
self.show_gui()
def send(self):
self.print_log()
self.update_settings()
self.update_json('AutoBookTKB-settings.json')
t = threading.Thread(target=self.auto_book)
t.start()
def print_log(self):
# sys.stdout = __redirection__(self.console)
print("*************************")
print("id: " + self.id_box.get())
print("pwd: " + self.pwd_box.get())
print("course: " + self.course_chosen.get())
print("location: " + self.location_chosen.get())
self.sessions = []
for idx, val in enumerate(self.session_checked):
if val.get():
self.sessions.append(idx)
print("sessions: " + str(self.sessions))
print("*************************")
def update_settings(self):
self.settings['id'] = self.id_box.get()
self.settings['password'] = self.pwd_box.get()
self.settings['classIndex'] = self.course_chosen.get()
self.settings['location'] = self.location_chosen.get()
self.settings['sessions'] = self.sessions
def update_json(self, f):
with open(f, 'w+', encoding="utf-8") as fp:
json.dump(self.settings, fp, indent=4, ensure_ascii=False)
fp.close()
def load_json(self, f):
with open(f, 'r', encoding="utf-8") as fp:
self.settings = json.load(fp)
fp.close()
def get_date_text(self):
import datetime
date = datetime.date.today() + datetime.timedelta(days=6)
return(str(date))
def show_gui(self):
ttk.Label(self.master, text=u"自動預約TKB上課座位")
ttk.Label(self.master, text="==================")
ttk.Label(self.master, text=u'身分證字號: ').place(x=50, y=80)
ttk.Label(self.master, text=u'密碼: ').place(x=50, y=120)
self.id_box.place(x=160, y=80)
self.pwd_box.place(x=160, y=120)
ttk.Label(self.master, text=u'課程: ').place(x=50, y=160)
self.course.place(x=160, y=160)
ttk.Label(self.master, text=u'日期: ').place(x=50, y=200)
self.date.place(x=160, y=200)
ttk.Label(self.master, text=u'地點: ').place(x=50, y=240)
self.location.place(x=160, y=240)
ttk.Label(self.master, text=u'場次: ').place(x=50, y=280)
y = 280
for s in range(len(self.session_checked)):
self.session_checkbuttons[s].place(x=160, y=y)
y = y + 20
ttk.Label(self.master,
text=u"按下送出後,將會於中午12:00或午夜12:00自動進行預約。").place(x=50, y=420)
self.send_button.place(x=160, y=440)
def auto_book(self):
from AutoBookTKB import AutoBookTKB
atb = AutoBookTKB('AutoBookTKB-settings.json')
atb.main()
class __redirection__():
def __init__(self, s):
self.buf=''
self.__console__=sys.stdout
self.console = s
def write(self, s):
s = s + '\n'
self.console.insert(tk.END, s)
def to_console(self):
sys.stdout = self.__console__
print(self.buf)
def flush(self):
self.buff=''
if __name__ == '__main__':
root = tk.Tk()
atb_gui = AutoBookTKB_GUI(root)
root.mainloop()
|
AltAnalyze.py
|
#!/usr/local/bin/python2.6
###AltAnalyze
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import math
#import pkg_resources
#import distutils
import statistics
import sys, string
import os.path
import unique
import update
import UI
import copy
import export; reload(export)
import ExpressionBuilder; reload(ExpressionBuilder)
import ExonAnalyze_module; reload(ExonAnalyze_module)
import ExonAnnotate_module; reload(ExonAnnotate_module)
import ResultsExport_module
import FeatureAlignment
import GO_Elite
import time
import webbrowser
import random
import traceback
import shutil
try:
import multiprocessing as mlp
except Exception:
mlp=None
print 'Note: Multiprocessing not supported for this verison python.'
try:
from scipy import stats
except Exception:
pass ### scipy is not required but is used as a faster implementation of Fisher Exact Test when present
try:
from PIL import Image as PIL_Image
try: import ImageTk
except Exception: from PIL import ImageTk
except Exception:
None #print 'Python Imaging Library not installed... using default PNG viewer'
use_Tkinter = 'no'
debug_mode = 'no'
analysis_start_time = time.time()
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
dir_list2 = [] #add in code to prevent folder names from being included
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".csv" or entry[-4:] == ".TXT":
dir_list2.append(entry)
return dir_list2
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def makeUnique(item):
db1={}; list1=[]; k=0
for i in item:
try: db1[i]=[]
except TypeError: db1[tuple(i)]=[]; k=1
for i in db1:
if k==0: list1.append(i)
else: list1.append(list(i))
list1.sort()
return list1
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def returnLargeGlobalVars():
### Prints all large global variables retained in memory (taking up space)
all = [var for var in globals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(globals()[var])>500:
print var, len(globals()[var])
except Exception: null=[]
def clearObjectsFromMemory(db_to_clear):
db_keys={}
try:
for key in db_to_clear: db_keys[key]=[]
except Exception:
for key in db_to_clear: del key ### if key is a list
for key in db_keys:
try: del db_to_clear[key]
except Exception:
try:
for i in key: del i ### For lists of tuples
except Exception: del key ### For plain lists
def importGeneric(filename):
fn=filepath(filename); key_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
key_db[t[0]] = t[1:]
return key_db
def importGenericFiltered(filename,filter_db):
fn=filepath(filename); key_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
key = t[0]
if key in filter_db: key_db[key] = t[1:]
return key_db
def importGenericFilteredDBList(filename,filter_db):
fn=filepath(filename); key_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
try:
null=filter_db[t[0]]
try: key_db[t[0]].append(t[1])
except KeyError: key_db[t[0]] = [t[1]]
except Exception: null=[]
return key_db
def importGenericDBList(filename):
fn=filepath(filename); key_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
try: key_db[t[0]].append(t[1])
except KeyError: key_db[t[0]] = [t[1]]
return key_db
def importExternalDBList(filename):
fn=filepath(filename); key_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
try: key_db[t[0]].append(t[1:])
except Exception: key_db[t[0]] = [t[1:]]
return key_db
def FindDir(dir,term):
dir_list = unique.read_directory(dir)
dir_list2=[]
dir_list.sort()
for i in dir_list:
if term == i: dir_list2.append(i)
if len(dir_list2)==0:
for i in dir_list:
if term in i: dir_list2.append(i)
dir_list2.sort(); dir_list2.reverse()
if len(dir_list2)>0: return dir_list2[0]
else: return ''
def openFile(file_dir):
if os.name == 'nt':
try: os.startfile('"'+file_dir+'"')
except Exception: os.system('open "'+file_dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+file_dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+file_dir+'"')
def openCytoscape(parent_dir,application_dir,application_name):
cytoscape_dir = FindDir(parent_dir,application_dir); cytoscape_dir = filepath(parent_dir+'/'+cytoscape_dir)
app_dir = FindDir(cytoscape_dir,application_name)
app_dir = cytoscape_dir+'/'+app_dir
if 'linux' in sys.platform:
app_dir = app_dir
app_dir2 = cytoscape_dir+'/Cytoscape'
try: createCytoscapeDesktop(cytoscape_dir)
except Exception: null=[]
dir_list = unique.read_directory('/usr/bin/') ### Check to see that JAVA is installed
if 'java' not in dir_list: print 'Java not referenced in "usr/bin/. If not installed,\nplease install and re-try opening Cytoscape'
try:
jar_path = cytoscape_dir+'/cytoscape.jar'
main_path = cytoscape_dir+'/cytoscape.CyMain'
plugins_path = cytoscape_dir+'/plugins'
os.system('java -Dswing.aatext=true -Xss5M -Xmx512M -jar '+jar_path+' '+main_path+' -p '+plugins_path+' &')
print 'Cytoscape jar opened:',jar_path
except Exception:
print 'OS command to open Java failed.'
try:
try: openFile(app_dir2); print 'Cytoscape opened:',app_dir2
except Exception:
os.chmod(app_dir,0777)
openFile(app_dir2)
except Exception:
try: openFile(app_dir)
except Exception:
os.chmod(app_dir,0777)
openFile(app_dir)
else:
try: openFile(app_dir)
except Exception:
os.chmod(app_dir,0777)
openFile(app_dir)
def createCytoscapeDesktop(cytoscape_dir):
cyto_ds_output = cytoscape_dir+'/Cytoscape.desktop'
data = export.ExportFile(cyto_ds_output)
cytoscape_desktop = cytoscape_dir+'/Cytoscape'; #cytoscape_desktop = '/hd3/home/nsalomonis/Cytoscape_v2.6.1/Cytoscape'
cytoscape_png = cytoscape_dir+ '/.install4j/Cytoscape.png'; #cytoscape_png = '/hd3/home/nsalomonis/Cytoscape_v2.6.1/.install4j/Cytoscape.png'
data.write('[Desktop Entry]'+'\n')
data.write('Type=Application'+'\n')
data.write('Name=Cytoscape'+'\n')
data.write('Exec=/bin/sh "'+cytoscape_desktop+'"'+'\n')
data.write('Icon='+cytoscape_png+'\n')
data.write('Categories=Application;'+'\n')
data.close()
########### Parse Input Annotations ###########
def ProbesetCalls(array_type,probeset_class,splice_event,constitutive_call,external_exonid):
include_probeset = 'yes'
if array_type == 'AltMouse':
exonid = splice_event
if filter_probesets_by == 'exon':
if '-' in exonid or '|' in exonid: ###Therfore the probeset represents an exon-exon junction or multi-exon probeset
include_probeset = 'no'
if filter_probesets_by != 'exon':
if '|' in exonid: include_probeset = 'no'
if constitutive_call == 'yes': include_probeset = 'yes'
else:
if avg_all_for_ss == 'yes' and (probeset_class == 'core' or len(external_exonid)>2): constitutive_call = 'yes'
#if len(splice_event)>2 and constitutive_call == 'yes' and avg_all_for_ss == 'no': constitutive_call = 'no'
if constitutive_call == 'no' and len(splice_event)<2 and len(external_exonid)<2: ###otherwise these are interesting probesets to keep
if filter_probesets_by != 'full':
if filter_probesets_by == 'extended':
if probeset_class == 'full': include_probeset = 'no'
elif filter_probesets_by == 'core':
if probeset_class != 'core': include_probeset = 'no'
return include_probeset,constitutive_call
def EvidenceOfAltSplicing(slicing_annot):
splice_annotations = ["ntron","xon","strangeSplice","Prime","3","5","C-term"]; as_call = 0
splice_annotations2 = ["ntron","assette","strangeSplice","Prime","3","5"]
for annot in splice_annotations:
if annot in slicing_annot: as_call = 1
if as_call == 1:
if "C-term" in slicing_annot and ("N-" in slicing_annot or "Promoter" in slicing_annot):
as_call = 0
for annot in splice_annotations2:
if annot in slicing_annot: as_call = 1
elif "bleed" in slicing_annot and ("N-" in slicing_annot or "Promoter" in slicing_annot):
as_call = 0
for annot in splice_annotations2:
if annot in slicing_annot: as_call = 1
return as_call
########### Begin Analyses ###########
class SplicingAnnotationData:
def ArrayType(self):
self._array_type = array_type
return self._array_type
def Probeset(self): return self._probeset
def setProbeset(self,probeset): self._probeset = probeset
def ExonID(self): return self._exonid
def setDisplayExonID(self,exonid): self._exonid = exonid
def GeneID(self): return self._geneid
def Symbol(self):
symbol = ''
if self.GeneID() in annotate_db:
y = annotate_db[self.GeneID()]
symbol = y.Symbol()
return symbol
def ExternalGeneID(self): return self._external_gene
def ProbesetType(self):
###e.g. Exon, junction, constitutive(gene)
return self._probeset_type
def GeneStructure(self): return self._block_structure
def SecondaryExonID(self): return self._block_exon_ids
def setSecondaryExonID(self,ids): self._block_exon_ids = ids
def setLocationData(self, chromosome, strand, probeset_start, probeset_stop):
self._chromosome = chromosome; self._strand = strand
self._start = probeset_start; self._stop = probeset_stop
def LocationSummary(self):
location = self.Chromosome()+':'+self.ProbeStart()+'-'+self.ProbeStop()+'('+self.Strand()+')'
return location
def Chromosome(self): return self._chromosome
def Strand(self): return self._strand
def ProbeStart(self): return self._start
def ProbeStop(self): return self._stop
def ProbesetClass(self):
###e.g. core, extendended, full
return self._probest_class
def ExternalExonIDs(self): return self._external_exonids
def ExternalExonIDList(self):
external_exonid_list = string.split(self.ExternalExonIDs(),'|')
return external_exonid_list
def Constitutive(self): return self._constitutive_status
def setTranscriptCluster(self,secondary_geneid): self._secondary_geneid = secondary_geneid
def setNovelExon(self,novel_exon): self._novel_exon = novel_exon
def NovelExon(self): return self._novel_exon
def SecondaryGeneID(self): return self._secondary_geneid
def setExonRegionID(self,exon_region): self._exon_region = exon_region
def ExonRegionID(self): return self._exon_region
def SplicingEvent(self):
splice_event = self._splicing_event
if len(splice_event)!=0:
if splice_event[0] == '|': splice_event = splice_event[1:]
return splice_event
def SplicingCall(self): return self._splicing_call
def SpliceJunctions(self): return self._splice_junctions
def Delete(self): del self
def Report(self):
output = self.ArrayType() +'|'+ self.ExonID() +'|'+ self.ExternalGeneID()
return output
def __repr__(self): return self.Report()
class AltMouseData(SplicingAnnotationData):
def __init__(self,affygene,exons,ensembl,block_exon_ids,block_structure,probe_type_call):
self._geneid = affygene; self._external_gene = ensembl; self._exonid = exons; self._secondary_geneid = ensembl
self._probeset_type = probe_type_call; self._block_structure = block_structure; self._block_exon_ids = block_exon_ids
self._external_exonids = 'NA';
self._constitutive_status = 'no'
self._splicing_event = ''
self._secondary_geneid = 'NA'
self._exon_region = ''
if self._probeset_type == 'gene': self._constitutive_status = 'yes'
else: self._constitutive_status = 'no'
class AffyExonSTData(SplicingAnnotationData):
def __init__(self,ensembl_gene_id,exon_id,ens_exon_ids, constitutive_call_probeset, exon_region, splicing_event, splice_junctions, splicing_call):
self._geneid = ensembl_gene_id; self._external_gene = ensembl_gene_id; self._exonid = exon_id
self._constitutive_status = constitutive_call_probeset#; self._start = probeset_start; self._stop = probeset_stop
self._external_exonids = ens_exon_ids; #self._secondary_geneid = transcript_cluster_id#; self._chromosome = chromosome; self._strand = strand
self._exon_region=exon_region; self._splicing_event=splicing_event; self._splice_junctions=splice_junctions; self._splicing_call = splicing_call
if self._exonid[0] == 'U': self._probeset_type = 'UTR'
elif self._exonid[0] == 'E': self._probeset_type = 'exonic'
elif self._exonid[0] == 'I': self._probeset_type = 'intronic'
class AffyExonSTDataAbbreviated(SplicingAnnotationData):
def __init__(self,ensembl_gene_id,exon_id,splicing_call):
self._geneid = ensembl_gene_id; self._exonid = exon_id; self._splicing_call = splicing_call
def importSplicingAnnotations(array_type,Species,probeset_type,avg_ss_for_all,root_dir):
global filter_probesets_by; filter_probesets_by = probeset_type
global species; species = Species; global avg_all_for_ss; avg_all_for_ss = avg_ss_for_all; global exon_db; exon_db={}
global summary_data_db; summary_data_db={}; global remove_intronic_junctions; remove_intronic_junctions = 'no'
if array_type == 'RNASeq':
probeset_annotations_file = root_dir+'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_junctions.txt'
else: probeset_annotations_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_probesets.txt'
filtered_arrayids={};filter_status='no'
constitutive_probeset_db,exon_db,genes_being_analyzed = importSplicingAnnotationDatabase(probeset_annotations_file,array_type,filtered_arrayids,filter_status)
return exon_db, constitutive_probeset_db
def importSplicingAnnotationDatabase(filename,array_type,filtered_arrayids,filter_status):
begin_time = time.time()
probesets_included_by_new_evidence = 0; export_exon_regions = 'yes'
if 'fake' in array_type: array_type = string.replace(array_type,'-fake',''); original_arraytype = 'RNASeq'
else: original_arraytype = array_type
if filter_status == 'no': global gene_transcript_cluster_db; gene_transcript_cluster_db={}; gene_transcript_cluster_db2={}; global last_exon_region_db; last_exon_region_db = {}
else: new_exon_db={}
fn=filepath(filename)
last_gene = ' '; last_exon_region = ''
constitutive_probeset_db = {}; constitutive_gene = {}
count = 0; x = 0; constitutive_original = {}
#if filter_status == 'yes': exon_db = {}
if array_type == 'AltMouse':
for line in open(fn,'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
probeset,affygene,exons,transcript_num,transcripts,probe_type_call,ensembl,block_exon_ids,block_structure,comparison_info = string.split(probeset_data,'\t')
###note: currently exclude comparison_info since not applicable for existing analyses
if x == 0: x = 1
else:
if exons[-1] == '|': exons = exons[0:-1]
if affygene[-1] == '|': affygene = affygene[0:-1]; constitutive_gene[affygene]=[]
if probe_type_call == 'gene': constitutive_call = 'yes' #looked through the probe annotations and the gene seems to be the most consistent constitutive feature
else: constitutive_call = 'no'
include_call,constitutive_call = ProbesetCalls(array_type,'',exons,constitutive_call,'')
if include_call == 'yes':
probe_data = AltMouseData(affygene,exons,ensembl,block_exon_ids,block_structure,probe_type_call) #this used to just have affygene,exon in the values (1/17/05)
exon_db[probeset] = probe_data
if filter_status == 'yes': new_exon_db[probeset] = probe_data
if constitutive_call == 'yes': constitutive_probeset_db[probeset] = affygene
genes_being_analyzed = constitutive_gene
else:
for line in open(fn,'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
if x == 0: x = 1
else:
try: probeset_id, exon_id, ensembl_gene_id, transcript_cluster_id, chromosome, strand, probeset_start, probeset_stop, affy_class, constitutive_call_probeset, external_exonid, ens_const_exons, exon_region, exon_region_start, exon_region_stop, splicing_event, splice_junctions = string.split(probeset_data,'\t')
except Exception: print probeset_data;force_error
if affy_class == 'free': affy_class = 'full' ### Don't know what the difference is
include_call,constitutive_call = ProbesetCalls(array_type,affy_class,splicing_event,constitutive_call_probeset,external_exonid)
#if 'ENSG00000163904:E11.5' in probeset_id: print probeset_data
#print array_type,affy_class,splicing_event,constitutive_call_probeset,external_exonid,constitutive_call,include_call;kill
if array_type == 'junction' and '.' not in exon_id: exon_id = string.replace(exon_id,'-','.'); exon_region = string.replace(exon_region,'-','.')
if ensembl_gene_id != last_gene: new_gene = 'yes'
else: new_gene = 'no'
if filter_status == 'no' and new_gene == 'yes':
if '.' in exon_id: ### Exclude junctions
if '-' not in last_exon_region and 'E' in last_exon_region: last_exon_region_db[last_gene] = last_exon_region
else: last_exon_region_db[last_gene] = last_exon_region
last_gene = ensembl_gene_id
if len(exon_region)>1: last_exon_region = exon_region ### some probeset not linked to an exon region
###Record the transcript clusters assoicated with each gene to annotate the results later on
if constitutive_call_probeset!=constitutive_call: probesets_included_by_new_evidence +=1#; print probeset_id,[splicing_event],[constitutive_call_probeset];kill
proceed = 'no'; as_call = 0
if array_type == 'RNASeq' or array_type == 'junction': include_call = 'yes' ### Constitutive expression is not needed
if remove_intronic_junctions == 'yes':
if 'E' not in exon_id: include_call = 'no' ### Remove junctions that only have splice-sites within an intron or UTR
if include_call == 'yes' or constitutive_call == 'yes':
#if proceed == 'yes':
as_call = EvidenceOfAltSplicing(splicing_event)
if filter_status == 'no':
probe_data = AffyExonSTDataAbbreviated(ensembl_gene_id, exon_id, as_call)
if array_type != 'RNASeq':
probe_data.setTranscriptCluster(transcript_cluster_id)
try:
if export_exon_regions == 'yes':
probe_data.setExonRegionID(exon_region)
except Exception: null=[]
else:
probe_data = AffyExonSTData(ensembl_gene_id, exon_id, external_exonid, constitutive_call, exon_region, splicing_event, splice_junctions, as_call)
probe_data.setLocationData(chromosome, strand, probeset_start, probeset_stop)
if array_type != 'RNASeq':
probe_data.setTranscriptCluster(transcript_cluster_id)
else:
probe_data.setNovelExon(affy_class)
if filter_status == 'yes':
try: ### saves memory
null = filtered_arrayids[probeset_id]
new_exon_db[probeset_id] = probe_data
except KeyError: null = []
else: exon_db[probeset_id] = probe_data
if constitutive_call == 'yes' and filter_status == 'no': ###only perform function when initially running
constitutive_probeset_db[probeset_id] = ensembl_gene_id
try: constitutive_gene[ensembl_gene_id].append(probeset_id)
except Exception: constitutive_gene[ensembl_gene_id] = [probeset_id]
###Only consider transcript clusters that make up the constitutive portion of the gene or that are alternatively regulated
if array_type != 'RNASeq':
try: gene_transcript_cluster_db[ensembl_gene_id].append(transcript_cluster_id)
except KeyError: gene_transcript_cluster_db[ensembl_gene_id] = [transcript_cluster_id]
if constitutive_call_probeset == 'yes' and filter_status == 'no': ###only perform function when initially running
try: constitutive_original[ensembl_gene_id].append(probeset_id)
except KeyError: constitutive_original[ensembl_gene_id] = [probeset_id]
if array_type != 'RNASeq':
try: gene_transcript_cluster_db2[ensembl_gene_id].append(transcript_cluster_id)
except KeyError: gene_transcript_cluster_db2[ensembl_gene_id] = [transcript_cluster_id]
###If no constitutive probesets for a gene as a result of additional filtering (removing all probesets associated with a splice event), add these back
original_probesets_add = 0; genes_being_analyzed = {}
for gene in constitutive_gene: genes_being_analyzed[gene]=[]
for gene in constitutive_original:
if gene not in constitutive_gene:
genes_being_analyzed[gene] = [gene]
constitutive_gene[gene]=[]
original_probesets_add +=1
gene_transcript_cluster_db[gene] = gene_transcript_cluster_db2[gene]
for probeset in constitutive_original[gene]: constitutive_probeset_db[probeset] = gene
#if array_type == 'junction' or array_type == 'RNASeq':
### Added the below in 1.16!!!
### If no constitutive probesets for a gene assigned, assign all gene probesets
for probeset in exon_db:
gene = exon_db[probeset].GeneID()
proceed = 'no'
exonid = exon_db[probeset].ExonID()
### Rather than add all probesets, still filter based on whether the probeset is in an annotated exon
if 'E' in exonid and 'I' not in exonid and '_' not in exonid: proceed = 'yes'
if proceed == 'yes':
if gene not in constitutive_gene:
constitutive_probeset_db[probeset] = gene
genes_being_analyzed[gene] = [gene]
### DO NOT ADD TO constitutive_gene SINCE WE WANT ALL mRNA ALIGNING EXONS/JUNCTIONS TO BE ADDED!!!!
#constitutive_gene[gene]=[]
gene_transcript_cluster_db = eliminate_redundant_dict_values(gene_transcript_cluster_db)
#if affygene == 'ENSMUSG00000023089': print [abs(fold_change_log)],[log_fold_cutoff];kill
if array_type == 'RNASeq':
import RNASeq
try: last_exon_region_db = RNASeq.importExonAnnotations(species,'distal-exon','')
except Exception: null=[]
constitutive_original=[]; constitutive_gene=[]
#clearObjectsFromMemory(exon_db); constitutive_probeset_db=[];genes_being_analyzed=[] ### used to evaluate how much memory objects are taking up
#print 'remove_intronic_junctions:',remove_intronic_junctions
#print constitutive_gene['ENSMUSG00000031170'];kill ### Determine if avg_ss_for_all is working
if original_arraytype == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print len(exon_db),id_name,'stored as instances of SplicingAnnotationData in memory'
#print len(constitutive_probeset_db),'array IDs stored as constititive'
#print probesets_included_by_new_evidence, 'array IDs were re-annotated as NOT constitutive based on mRNA evidence'
if array_type != 'AltMouse': print original_probesets_add, 'genes not viewed as constitutive as a result of filtering',id_name,'based on splicing evidence, added back'
end_time = time.time(); time_diff = int(end_time-begin_time)
#print filename,"import finished in %d seconds" % time_diff
if filter_status == 'yes': return new_exon_db
else:
summary_data_db['gene_assayed'] = len(genes_being_analyzed)
try: exportDenominatorGenes(genes_being_analyzed)
except Exception: null=[]
return constitutive_probeset_db,exon_db,genes_being_analyzed
def exportDenominatorGenes(genes_being_analyzed):
goelite_output = root_dir+'GO-Elite/denominator/AS.denominator.txt'
goelite_data = export.ExportFile(goelite_output)
systemcode = 'En'
goelite_data.write("GeneID\tSystemCode\n")
for gene in genes_being_analyzed:
if array_type == 'AltMouse':
try: gene = annotate_db[gene].ExternalGeneID()
except KeyError: null = []
goelite_data.write(gene+'\t'+systemcode+'\n')
try: goelite_data.close()
except Exception: null=[]
def performExpressionAnalysis(filename,constitutive_probeset_db,exon_db,annotate_db,dataset_name):
#if analysis_method == 'splicing-index': returnLargeGlobalVars();kill ### used to ensure all large global vars from the reciprocal junction analysis have been cleared from memory
#returnLargeGlobalVars()
"""import list of expression values for arrayids and calculates statistics"""
global fold_dbase; global original_conditions; global normalization_method
stats_dbase = {}; fold_dbase={}; ex_db={}; si_db=[]; bad_row_import = {}; count=0
global array_group_name_db; array_group_name_db = {}
global array_group_db; array_group_db = {};
global array_raw_group_values; array_raw_group_values = {}; global original_array_names; original_array_names=[]
global max_replicates; global equal_replicates; global array_group_list
array_index_list = [] ###Use this list for permutation analysis
fn=filepath(filename); line_num = 1
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line); t = string.split(data,'\t'); probeset = t[0]
if t[0]== '#': null=[] ### Don't import line
elif line_num == 1:
line_num += 1 #makes this value null for the next loop of actual array data
###Below ocucrs if the data is raw opposed to precomputed
if ':' in t[1]:
array_group_list = []; x=0 ###gives us an original index value for each entry in the group
for entry in t[1:]:
original_array_names.append(entry)
aa = string.split(entry,':')
try: array_group,array_name = aa
except Exception: array_name = string.join(aa[1:],':'); array_group = aa[0]
try:
array_group_db[array_group].append(x)
array_group_name_db[array_group].append(array_name)
except KeyError:
array_group_db[array_group] = [x]
array_group_name_db[array_group] = [array_name]
### below only occurs with a new group addition
array_group_list.append(array_group) #use this to generate comparisons in the below linked function
x += 1
else:
#try: print data_type
#except Exception,exception:
#print exception
#print traceback.format_exc()
print_out = 'The AltAnalyze filtered expression file "'+filename+'" is not propperly formatted.\n Review formatting requirements if this file was created by another application.\n'
print_out += "\nFirst line\n"+line
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
else:
#if probeset in exon_db:
#if exon_db[probeset].GeneID() == 'ENSG00000139970':
###Use the index values from above to assign each expression value to a new database
temp_group_array = {}
line_num+=1
for group in array_group_db:
if count == 0: array_index_list.append(array_group_db[group])
for array_index in array_group_db[group]:
try: exp_val = float(t[array_index+1])
except Exception:
if 'Gene_ID' not in line: bad_row_import[probeset]=line; exp_val = 1
###appended is the numerical expression value for each array in the group (temporary array)
try: temp_group_array[group].append(exp_val) #add 1 since probeset is the first column
except KeyError: temp_group_array[group] = [exp_val]
if count == 0: array_index_list.sort(); count = 1
####store the group database within the probeset database entry
try:
null = exon_db[probeset] ###To conserve memory, don't store any probesets not used for downstream analyses (e.g. not linked to mRNAs)
#if 'ENSG00000139970' in probeset:
#print [max_exp]
#print t[1:];kill
#max_exp = max(map(float, t[1:]))
#if len(array_raw_group_values)>10000: break
#if max_exp>math.log(70,2):
array_raw_group_values[probeset] = temp_group_array
except KeyError:
#print probeset
pass
print len(array_raw_group_values), 'sequence identifiers imported out of', line_num-1
if len(bad_row_import)>0:
print len(bad_row_import), "Rows with an unexplained import error processed and deleted."
print "Example row:"; x=0
for i in bad_row_import:
if x==0: print bad_row_import[i]
try: del array_raw_group_values[i]
except Exception: null=[]
x+=1
### If no gene expression reporting probesets were imported, update constitutive_probeset_db to include all mRNA aligning probesets
cs_genedb={}; missing_genedb={}; addback_genedb={}; rnaseq_cs_gene_db={}
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
#if gene == 'ENSG00000185008': print [probeset]
try:
null=array_raw_group_values[probeset]; cs_genedb[gene]=[]
if gene == probeset: rnaseq_cs_gene_db[gene]=[] ### If RPKM normalization used, use the gene expression values already calculated
except Exception: missing_genedb[gene]=[] ### Collect possible that are missing from constitutive database (verify next)
for gene in missing_genedb:
try: null=cs_genedb[gene]
except Exception: addback_genedb[gene]=[]
for probeset in array_raw_group_values:
try:
gene = exon_db[probeset].GeneID()
try:
null=addback_genedb[gene]
if 'I' not in probeset and 'U' not in probeset: ### No intron or UTR containing should be used for constitutive expression
null=string.split(probeset,':')
if len(null)<3: ### No trans-gene junctions should be used for constitutive expression
constitutive_probeset_db[probeset]=gene
except Exception: null=[]
except Exception: null=[]
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
#if gene == 'ENSG00000185008': print [[probeset]]
### Only examine values for associated exons when determining RNASeq constitutive expression (when exon data is present)
normalization_method = 'raw'
if array_type == 'RNASeq':
junction_count=0; constitutive_probeset_db2={}
for uid in constitutive_probeset_db:
if '-' in uid: junction_count+=1
if len(rnaseq_cs_gene_db)>0: ### If filtered RPKM gene-level expression data present, use this instead (and only this)
normalization_method = 'RPKM'
constitutive_probeset_db={} ### Re-set this database
for gene in rnaseq_cs_gene_db:
constitutive_probeset_db[gene]=gene
elif junction_count !=0 and len(constitutive_probeset_db) != junction_count:
### occurs when there is a mix of junction and exon IDs
for uid in constitutive_probeset_db:
if '-' not in uid: constitutive_probeset_db2[uid] = constitutive_probeset_db[uid]
constitutive_probeset_db = constitutive_probeset_db2; constitutive_probeset_db2=[]
"""
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
if gene == 'ENSG00000185008': print [probeset]
"""
###Build all putative splicing events
global alt_junction_db; global exon_dbase; global critical_exon_db; critical_exon_db={}
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
### Applies to reciprocal junction analyses only
if array_type == 'AltMouse':
alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db = ExonAnnotate_module.identifyPutativeSpliceEvents(exon_db,constitutive_probeset_db,array_raw_group_values,agglomerate_inclusion_probesets,onlyAnalyzeJunctions)
print 'Number of Genes with Examined Splice Events:',len(alt_junction_db)
elif (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null':
import JunctionArray
alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db = JunctionArray.getPutativeSpliceEvents(species,array_type,exon_db,agglomerate_inclusion_probesets,root_dir)
print 'Number of Genes with Examined Splice Events:',len(alt_junction_db)
#alt_junction_db=[]; critical_exon_db=[]; exon_dbase=[]; exon_inclusion_db=[]
if agglomerate_inclusion_probesets == 'yes':
array_raw_group_values = agglomerateInclusionProbesets(array_raw_group_values,exon_inclusion_db)
exon_inclusion_db=[]
### For datasets with high memory requirements (RNASeq), filter the current and new databases
### Begin this function after agglomeration to ensure agglomerated probesets are considered
reciprocal_probesets = {}
if array_type == 'junction' or array_type == 'RNASeq':
for affygene in alt_junction_db:
for event in alt_junction_db[affygene]:
reciprocal_probesets[event.InclusionProbeset()]=[]
reciprocal_probesets[event.ExclusionProbeset()]=[]
not_evalutated={}
for probeset in array_raw_group_values:
try: null=reciprocal_probesets[probeset]
except Exception:
### Don't remove constitutive probesets
try: null=constitutive_probeset_db[probeset]
except Exception: not_evalutated[probeset]=[]
#print 'Removing',len(not_evalutated),'exon/junction IDs not evaulated for splicing'
for probeset in not_evalutated:
del array_raw_group_values[probeset]
###Check to see if we have precomputed expression data or raw to be analyzed
x=0; y=0; array_raw_group_values2={}; probesets_to_delete=[] ### Record deleted probesets
if len(array_raw_group_values)==0:
print_out = "No genes were considered 'Expressed' based on your input options. Check to make sure that the right species database is indicated and that the right data format has been selected (e.g., non-log versus log expression)."
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out; print "Exiting program"
badExit()
elif len(array_raw_group_values)>0:
###array_group_list should already be unique and correctly sorted (see above)
for probeset in array_raw_group_values:
data_lists=[]
for group_name in array_group_list:
data_list = array_raw_group_values[probeset][group_name] ###nested database entry access - baseline expression
if global_addition_factor > 0: data_list = addGlobalFudgeFactor(data_list,'log')
data_lists.append(data_list)
if len(array_group_list)==2:
data_list1 = data_lists[0]; data_list2 = data_lists[-1]; avg1 = statistics.avg(data_list1); avg2 = statistics.avg(data_list2)
log_fold = avg2 - avg1
try:
#t,df,tails = statistics.ttest(data_list1,data_list2,2,3) #unpaired student ttest, calls p_value function
#t = abs(t); df = round(df) #Excel doesn't recognize fractions in a DF
#p = statistics.t_probability(t,df)
p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
if p == -1:
if len(data_list1)>1 and len(data_list2)>1:
print_out = "The probability statistic selected ("+probability_statistic+") is not compatible with the\nexperimental design. Please consider an alternative statistic or correct the problem.\nExiting AltAnalyze."
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out; print "Exiting program"
badExit()
else: p = 1
except Exception: p = 1
fold_dbase[probeset] = [0]; fold_dbase[probeset].append(log_fold)
stats_dbase[probeset]=[avg1]; stats_dbase[probeset].append(p)
###replace entries with the two lists for later permutation analysis
if p == -1: ### should by p == 1: Not sure why this filter was here, but mistakenly removes probesets where there is just one array for each group
del fold_dbase[probeset];del stats_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
if x == 1: print 'Bad data detected...', data_list1, data_list2
elif (avg1 < expression_threshold and avg2 < expression_threshold and p > p_threshold) and array_type != 'RNASeq': ### Inserted a filtering option to exclude small variance, low expreession probesets
del fold_dbase[probeset];del stats_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
else: array_raw_group_values2[probeset] = [data_list1,data_list2]
else: ###Non-junction analysis can handle more than 2 groups
index=0
for data_list in data_lists:
try: array_raw_group_values2[probeset].append(data_list)
except KeyError: array_raw_group_values2[probeset] = [data_list]
if len(array_group_list)>2: ### Thus, there is some variance for this probeset
### Create a complete stats_dbase containing all fold changes
if index==0:
avg_baseline = statistics.avg(data_list); stats_dbase[probeset] = [avg_baseline]
else:
avg_exp = statistics.avg(data_list)
log_fold = avg_exp - avg_baseline
try: fold_dbase[probeset].append(log_fold)
except KeyError: fold_dbase[probeset] = [0,log_fold]
index+=1
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
array_raw_group_values = array_raw_group_values2; array_raw_group_values2=[]
print x, id_name,"excluded prior to analysis... predicted not detected"
global original_avg_const_exp_db; global original_fold_dbase
global avg_const_exp_db; global permute_lists; global midas_db
if len(array_raw_group_values)>0:
adj_fold_dbase, nonlog_NI_db, conditions, gene_db, constitutive_gene_db, constitutive_fold_change, original_avg_const_exp_db = constitutive_exp_normalization(fold_dbase,stats_dbase,exon_db,constitutive_probeset_db)
stats_dbase=[] ### No longer needed after this point
original_fold_dbase = fold_dbase; avg_const_exp_db = {}; permute_lists = []; y = 0; original_conditions = conditions; max_replicates,equal_replicates = maxReplicates()
gene_expression_diff_db = constitutive_expression_changes(constitutive_fold_change,annotate_db) ###Add in constitutive fold change filter to assess gene expression for ASPIRE
while conditions > y:
avg_const_exp_db = constitutive_exp_normalization_raw(gene_db,constitutive_gene_db,array_raw_group_values,exon_db,y,avg_const_exp_db); y+=1
#print len(avg_const_exp_db),constitutive_gene_db['ENSMUSG00000054850']
###Export Analysis Results for external splicing analysis (e.g. MiDAS format)
if run_MiDAS == 'yes' and normalization_method != 'RPKM': ### RPKM has negative values which will crash MiDAS
status = ResultsExport_module.exportTransitResults(array_group_list,array_raw_group_values,array_group_name_db,avg_const_exp_db,adj_fold_dbase,exon_db,dataset_name,apt_location)
print "Finished exporting input data for MiDAS analysis"
try: midas_db = ResultsExport_module.importMidasOutput(dataset_name)
except Exception: midas_db = {} ### Occurs if there are not enough samples to calculate a MiDAS p-value
else: midas_db = {}
###Provides all pairwise permuted group comparisons
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
permute_lists = statistics.permute_arrays(array_index_list)
### Now remove probesets from the analysis that were used to evaluate gene expression
for probeset in constitutive_probeset_db:
try: null = reciprocal_probesets[probeset]
except Exception:
try: del array_raw_group_values[probeset]
except Exception: null=[]
not_evalutated=[]; reciprocal_probesets=[]
constitutive_probeset_db=[]
### Above, all conditions were examined when more than 2 are present... change this so that only the most extreeem are analyzed further
if len(array_group_list)>2 and analysis_method == 'splicing-index' and (array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null'): ### USED FOR MULTIPLE COMPARISONS
print 'Calculating splicing-index values for multiple group comparisons (please be patient)...',
"""
if len(midas_db)==0:
print_out = 'Warning!!! MiDAS failed to run for multiple groups. Please make\nsure there are biological replicates present for your groups.\nAltAnalyze requires replicates for multi-group (more than two) analyses.'
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out; print "Exiting program"
badExit()"""
if filter_for_AS == 'yes':
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try: del nonlog_NI_db[probeset]
except KeyError: null=[]
if export_NI_values == 'yes':
export_exon_regions = 'yes'
### Currently, we don't deal with raw adjusted expression values, just group, so just export the values for each group
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
print "Exporting all normalized intensities to:\n"+summary_output
adjoutput = export.ExportFile(summary_output)
title = string.join(['Gene\tExonID\tprobesetID']+original_array_names,'\t')+'\n'; adjoutput.write(title)
### Pick which data lists have the most extreem values using the NI_dbase (adjusted folds for each condition)
original_increment = int(len(nonlog_NI_db)/20); increment = original_increment; interaction = 0
for probeset in nonlog_NI_db:
if interaction == increment: increment+=original_increment; print '*',
interaction +=1
geneid = exon_db[probeset].GeneID(); ed = exon_db[probeset]
index=0; NI_list=[] ### Add the group_name to each adj fold value
for NI in nonlog_NI_db[probeset]:
NI_list.append((NI,index)); index+=1 ### setup to sort for the extreeme adj folds and get associated group_name using the index
raw_exp_vals = array_raw_group_values[probeset]
adj_exp_lists={} ### Store the adjusted expression values for each group
if geneid in avg_const_exp_db:
k=0; gi=0; adj_exp_vals = []
for exp_list in raw_exp_vals:
for exp in exp_list:
adj_exp_val = exp-avg_const_exp_db[geneid][k]
try: adj_exp_lists[gi].append(adj_exp_val)
except Exception: adj_exp_lists[gi] = [adj_exp_val]
if export_NI_values == 'yes': adj_exp_vals.append(str(adj_exp_val))
k+=1
gi+=1
if export_NI_values == 'yes':
#print geneid+'-'+probeset, adj_exp_val, [ed.ExonID()];kill
if export_exon_regions == 'yes':
try: ### Thid will only work if ExonRegionID is stored in the abreviated AffyExonSTData object - useful in comparing results between arrays (exon-region centric)
if (array_type == 'exon' or array_type == 'gene') or '-' not in ed.ExonID(): ### only include exon entries not junctions
exon_regions = string.split(ed.ExonRegionID(),'|')
for er in exon_regions:
if len(er)>0: er = er
else:
try: er = ed.ExonID()
except Exception: er = 'NA'
ev = string.join([geneid+'\t'+er+'\t'+probeset]+adj_exp_vals,'\t')+'\n'
if len(filtered_probeset_db)>0:
if probeset in filtered_probeset_db: adjoutput.write(ev) ### This is used when we want to restrict to only probesets known to already by changed
else: adjoutput.write(ev)
except Exception:
ev = string.join([geneid+'\t'+'NA'+'\t'+probeset]+adj_exp_vals,'\t')+'\n'; adjoutput.write(ev)
NI_list.sort()
examine_pairwise_comparisons = 'yes'
if examine_pairwise_comparisons == 'yes':
k1=0; k2=0; filtered_NI_comps = []
NI_list_rev = list(NI_list); NI_list_rev.reverse()
NI1,index1 = NI_list[k1]; NI2,index2 = NI_list_rev[k2]; abs_SI = abs(math.log(NI1/NI2,2))
if abs_SI<alt_exon_logfold_cutoff:
### Indicates that no valid matches were identified - hence, exit loop and return an NI_list with no variance
NI_list = [NI_list[0],NI_list[0]]
else:
### Indicates that no valid matches were identified - hence, exit loop and return an NI_list with no variance
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
#print 'original',abs_SI,k1,k2, ge_fold, constit_exp1, constit_exp2
if abs(ge_fold) < log_fold_cutoff: filtered_NI_comps.append([abs_SI,k1,k2])
else:
for i1 in NI_list:
k2=0
for i2 in NI_list_rev:
NI1,index1 = i1; NI2,index2 = i2; abs_SI = abs(math.log(NI1/NI2,2))
#constit_exp1 = original_avg_const_exp_db[geneid][index1]
#constit_exp2 = original_avg_const_exp_db[geneid][index2]
#ge_fold = constit_exp2-constit_exp1
#if abs(ge_fold) < log_fold_cutoff: filtered_NI_comps.append([abs_SI,k1,k2])
#print k1,k2, i1, i2, abs_SI, abs(ge_fold), log_fold_cutoff, alt_exon_logfold_cutoff
if abs_SI<alt_exon_logfold_cutoff: break
else:
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
if abs(ge_fold) < log_fold_cutoff:
filtered_NI_comps.append([abs_SI,k1,k2])
#if k1 == 49 or k1 == 50 or k1 == 51: print probeset, abs_SI, k1, k2, abs(ge_fold),log_fold_cutoff, index1, index2, NI1, NI2, constit_exp1,constit_exp2
k2+=1
k1+=1
if len(filtered_NI_comps)>0:
#print filtered_NI_comps
#print NI_list_rev
#print probeset,geneid
#print len(filtered_NI_comps)
#print original_avg_const_exp_db[geneid]
filtered_NI_comps.sort()
si,k1,k2 = filtered_NI_comps[-1]
NI_list = [NI_list[k1],NI_list_rev[k2]]
"""
NI1,index1 = NI_list[0]; NI2,index2 = NI_list[-1]
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
print probeset, si, ge_fold, NI_list"""
#print k1,k2;sys.exit()
index1 = NI_list[0][1]; index2 = NI_list[-1][1]
nonlog_NI_db[probeset] = [NI_list[0][0],NI_list[-1][0]] ### Update the values of this dictionary
data_list1 = array_raw_group_values[probeset][index1]; data_list2 = array_raw_group_values[probeset][index2]
avg1 = statistics.avg(data_list1); avg2 = statistics.avg(data_list2); log_fold = avg2 - avg1
group_name1 = array_group_list[index1]; group_name2 = array_group_list[index2]
try:
#t,df,tails = statistics.ttest(data_list1,data_list2,2,3) #unpaired student ttest, calls p_value function
#t = abs(t); df = round(df); ttest_exp_p = statistics.t_probability(t,df)
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 1
fold_dbase[probeset] = [0]; fold_dbase[probeset].append(log_fold)
if ttest_exp_p == -1: del fold_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
elif avg1 < expression_threshold and avg2 < expression_threshold and (ttest_exp_p > p_threshold and ttest_exp_p != 1): ### Inserted a filtering option to exclude small variance, low expreession probesets
del fold_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
else:
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
normInt1 = (avg1-constit_exp1); normInt2 = (avg2-constit_exp2)
adj_fold = normInt2 - normInt1
splicing_index = -1*adj_fold; abs_splicing_index = abs(splicing_index)
#print probeset, splicing_index, ge_fold, index1, index2
#normIntList1 = adj_exp_lists[index1]; normIntList2 = adj_exp_lists[index2]
all_nI=[]
for g_index in adj_exp_lists: all_nI.append(adj_exp_lists[g_index])
try: normIntensityP = statistics.OneWayANOVA(all_nI) #[normIntList1,normIntList2] ### This stays an ANOVA independent of the algorithm choosen since groups number > 2
except Exception: normIntensityP = 'NA'
if (normInt1*normInt2)<0: opposite_SI_log_mean = 'yes'
else: opposite_SI_log_mean = 'no'
abs_log_ratio = abs(ge_fold)
if probeset in midas_db:
try: midas_p = float(midas_db[probeset])
except ValueError: midas_p = 'NA'
else: midas_p = 'NA'
#if 'ENSG00000059588' in geneid: print probeset, splicing_index, constit_exp1, constit_exp2, ge_fold,group_name2+'_vs_'+group_name1, index1, index2
if abs_splicing_index>alt_exon_logfold_cutoff and (midas_p < p_threshold or midas_p == 'NA'): #and abs_log_ratio>1 and ttest_exp_p<0.05: ###and ge_threshold_count==2
exonid = ed.ExonID(); critical_exon_list = [1,[exonid]]
ped = ProbesetExpressionData(avg1, avg2, log_fold, adj_fold, ttest_exp_p, group_name2+'_vs_'+group_name1)
sid = ExonData(splicing_index,probeset,critical_exon_list,geneid,normInt1,normInt2,normIntensityP,opposite_SI_log_mean)
sid.setConstitutiveExpression(constit_exp1); sid.setConstitutiveFold(ge_fold); sid.setProbesetExpressionData(ped)
si_db.append((splicing_index,sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(splicing_index,geneid,normIntensityP)
ex_db[probeset] = eed
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print len(si_db),id_name,"with evidence of Alternative expression"
original_fold_dbase = fold_dbase; si_db.sort()
summary_data_db['denominator_exp_events']=len(nonlog_NI_db)
del avg_const_exp_db; del gene_db; del constitutive_gene_db; gene_expression_diff_db={}
if export_NI_values == 'yes': adjoutput.close()
### Above, all conditions were examined when more than 2 are present... change this so that only the most extreeem are analyzed further
elif len(array_group_list)>2 and (array_type == 'junction' or array_type == 'RNASeq' or array_type == 'AltMouse'): ### USED FOR MULTIPLE COMPARISONS
excluded_probeset_db={}
group_sizes = []; original_array_indices = permute_lists[0] ###p[0] is the original organization of the group samples prior to permutation
for group in original_array_indices: group_sizes.append(len(group))
if analysis_method == 'linearregres': ### For linear regression, these scores are non-long
original_array_raw_group_values = copy.deepcopy(array_raw_group_values)
for probeset in array_raw_group_values:
ls_concatenated=[]
for group in array_raw_group_values[probeset]: ls_concatenated+=group
ls_concatenated = statistics.log_fold_conversion_fraction(ls_concatenated)
array_raw_group_values[probeset] = ls_concatenated
pos1=0; pos2=0; positions=[]
for group in group_sizes:
if pos1 == 0: pos2 = group; positions.append((pos1,pos2))
else: pos2 = pos1+group; positions.append((pos1,pos2))
pos1 = pos2
if export_NI_values == 'yes':
export_exon_regions = 'yes'
### Currently, we don't deal with raw adjusted expression values, just group, so just export the values for each group
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
print "Exporting all normalized intensities to:\n"+summary_output
adjoutput = export.ExportFile(summary_output)
title = string.join(['gene\tprobesets\tExonRegion']+original_array_names,'\t')+'\n'; adjoutput.write(title)
events_examined= 0; denominator_events=0; fold_dbase=[]; adj_fold_dbase=[]; scores_examined=0
splice_event_list=[]; splice_event_list_mx=[]; splice_event_list_non_mx=[]; event_mx_temp = []; permute_p_values={}; probeset_comp_db={}#use this to exclude duplicate mx events
for geneid in alt_junction_db:
affygene = geneid
for event in alt_junction_db[geneid]:
if array_type == 'AltMouse':
#event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
#critical_exon_db[affygene,tuple(critical_exons)] = [1,'E'+str(e1a),'E'+str(e2b)] --- affygene,tuple(event) == key, 1 indicates both are either up or down together
event_call = event[0][0] + '-' + event[1][0]
exon_set1 = event[0][1]; exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene,exon_set1]
probeset2 = exon_dbase[affygene,exon_set2]
critical_exon_list = critical_exon_db[affygene,tuple(event)]
if array_type == 'junction' or array_type == 'RNASeq':
event_call = 'ei-ex' ### Below objects from JunctionArrayEnsemblRules - class JunctionInformation
probeset1 = event.InclusionProbeset(); probeset2 = event.ExclusionProbeset()
exon_set1 = event.InclusionJunction(); exon_set2 = event.ExclusionJunction()
try: novel_event = event.NovelEvent()
except Exception: novel_event = 'known'
critical_exon_list = [1,event.CriticalExonSets()]
key,jd = formatJunctionData([probeset1,probeset2],geneid,critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try: jd.setSymbol(annotate_db[geneid].Symbol())
except Exception: null=[]
#if '|' in probeset1: print probeset1, key,jd.InclusionDisplay();kill
probeset_comp_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
dI_scores=[]
if probeset1 in nonlog_NI_db and probeset2 in nonlog_NI_db and probeset1 in array_raw_group_values and probeset2 in array_raw_group_values:
events_examined+=1
if analysis_method == 'ASPIRE':
index1=0; NI_list1=[]; NI_list2=[] ### Add the group_name to each adj fold value
for NI in nonlog_NI_db[probeset1]: NI_list1.append(NI)
for NI in nonlog_NI_db[probeset2]: NI_list2.append(NI)
for NI1_g1 in NI_list1:
NI2_g1 = NI_list2[index1]; index2=0
for NI1_g2 in NI_list1:
try: NI2_g2 = NI_list2[index2]
except Exception: print index1, index2, NI_list1, NI_list2;kill
if index1 != index2:
b1 = NI1_g1; e1 = NI1_g2
b2 = NI2_g1; e2 = NI2_g2
try:
dI = statistics.aspire_stringent(b1,e1,b2,e2); Rin = b1/e1; Rex = b2/e2
if (Rin>1 and Rex<1) or (Rin<1 and Rex>1):
if dI<0: i1,i2 = index2,index1 ### all scores should indicate upregulation
else: i1,i2=index1,index2
dI_scores.append((abs(dI),i1,i2))
except Exception:
#if array_type != 'RNASeq': ### RNASeq has counts of zero and one that can cause the same result between groups and probesets
#print probeset1, probeset2, b1, e1, b2, e2, index1, index2, events_examined;kill
### Exception - Occurs for RNA-Seq but can occur for array data under extreemly rare circumstances (Rex=Rin even when different b1,e1 and b2,ed values)
null=[]
index2+=1
index1+=1
dI_scores.sort()
if analysis_method == 'linearregres':
log_fold,i1,i2 = getAllPossibleLinearRegressionScores(probeset1,probeset2,positions,group_sizes)
dI_scores.append((log_fold,i1,i2))
raw_exp_vals1 = original_array_raw_group_values[probeset1]; raw_exp_vals2 = original_array_raw_group_values[probeset2]
else: raw_exp_vals1 = array_raw_group_values[probeset1]; raw_exp_vals2 = array_raw_group_values[probeset2]
adj_exp_lists1={}; adj_exp_lists2={} ### Store the adjusted expression values for each group
if geneid in avg_const_exp_db:
gi=0; l=0; adj_exp_vals = []; anova_test=[]
for exp_list in raw_exp_vals1:
k=0; anova_group=[]
for exp in exp_list:
adj_exp_val1 = exp-avg_const_exp_db[geneid][l]
try: adj_exp_lists1[gi].append(adj_exp_val1)
except Exception: adj_exp_lists1[gi] = [adj_exp_val1]
adj_exp_val2 = raw_exp_vals2[gi][k]-avg_const_exp_db[geneid][l]
try: adj_exp_lists2[gi].append(adj_exp_val2)
except Exception: adj_exp_lists2[gi] = [adj_exp_val2]
anova_group.append(adj_exp_val2-adj_exp_val1)
if export_NI_values == 'yes':
#if analysis_method == 'ASPIRE':
adj_exp_vals.append(str(adj_exp_val2-adj_exp_val1))
### BELOW CODE PRODUCES THE SAME RESULT!!!!
"""folds1 = statistics.log_fold_conversion_fraction([exp])
folds2 = statistics.log_fold_conversion_fraction([raw_exp_vals2[gi][k]])
lr_score = statistics.convert_to_log_fold(statistics.simpleLinRegress(folds1,folds2))
adj_exp_vals.append(str(lr_score))"""
k+=1; l+=0
gi+=1; anova_test.append(anova_group)
if export_NI_values == 'yes':
if export_exon_regions == 'yes':
exon_regions = string.join(critical_exon_list[1],'|')
exon_regions = string.split(exon_regions,'|')
for er in exon_regions:
ev = string.join([geneid+'\t'+probeset1+'-'+probeset2+'\t'+er]+adj_exp_vals,'\t')+'\n'
if len(filtered_probeset_db)>0:
if probeset1 in filtered_probeset_db and probeset2 in filtered_probeset_db:
adjoutput.write(ev) ### This is used when we want to restrict to only probesets known to already by changed
else: adjoutput.write(ev)
try: anovaNIp = statistics.OneWayANOVA(anova_test) ### This stays an ANOVA independent of the algorithm choosen since groups number > 2
except Exception: anovaNIp='NA'
if len(dI_scores)>0 and geneid in avg_const_exp_db:
dI,index1,index2 = dI_scores[-1]; count=0
probesets = [probeset1, probeset2]; index=0
key,jd = formatJunctionData([probeset1,probeset2],affygene,critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try: jd.setSymbol(annotate_db[affygene].Symbol())
except Exception:null=[]
probeset_comp_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
if max_replicates >2 or equal_replicates==2: permute_p_values[(probeset1,probeset2)] = [anovaNIp, 'NA', 'NA', 'NA']
index=0
for probeset in probesets:
if analysis_method == 'linearregres':
data_list1 = original_array_raw_group_values[probeset][index1]; data_list2 = original_array_raw_group_values[probeset][index2]
else: data_list1 = array_raw_group_values[probeset][index1]; data_list2 = array_raw_group_values[probeset][index2]
baseline_exp = statistics.avg(data_list1); experimental_exp = statistics.avg(data_list2); fold_change = experimental_exp - baseline_exp
group_name1 = array_group_list[index1]; group_name2 = array_group_list[index2]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 'NA'
if ttest_exp_p==1: ttest_exp_p = 'NA'
if index == 0:
try: adj_fold = statistics.avg(adj_exp_lists1[index2]) - statistics.avg(adj_exp_lists1[index1])
except Exception:
print raw_exp_vals1,raw_exp_vals2, avg_const_exp_db[geneid]
print probeset,probesets,adj_exp_lists1,adj_exp_lists2,index1,index2;kill
ped1 = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, group_name2+'_vs_'+group_name1)
else:
adj_fold = statistics.avg(adj_exp_lists2[index2]) - statistics.avg(adj_exp_lists2[index1])
ped2 = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, group_name2+'_vs_'+group_name1)
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
index+=1
try:
pp1 = statistics.runComparisonStatistic(adj_exp_lists1[index1], adj_exp_lists1[index2],probability_statistic)
pp2 = statistics.runComparisonStatistic(adj_exp_lists2[index1], adj_exp_lists2[index2],probability_statistic)
except Exception: pp1 = 'NA'; pp2 = 'NA'
if analysis_method == 'ASPIRE' and len(dI_scores)>0:
p1 = JunctionExpressionData(adj_exp_lists1[index1], adj_exp_lists1[index2], pp1, ped1)
p2 = JunctionExpressionData(adj_exp_lists2[index1], adj_exp_lists2[index2], pp2, ped2)
### ANOVA p-replaces the below p-value
"""try: baseline_scores, exp_scores, pairwiseNIp = calculateAllASPIREScores(p1,p2)
except Exception: baseline_scores = [0]; exp_scores=[dI]; pairwiseNIp = 0 """
#if pairwiseNIp == 'NA': pairwiseNIp = 0 ### probably comment out
if len(dI_scores)>0:
scores_examined+=1
if probeset in midas_db:
try: midas_p = float(midas_db[probeset])
except ValueError: midas_p = 'NA'
else: midas_p = 'NA'
if dI>alt_exon_logfold_cutoff and (anovaNIp < p_threshold or perform_permutation_analysis == 'yes' or anovaNIp == 'NA' or anovaNIp == 1): #and abs_log_ratio>1 and ttest_exp_p<0.05: ###and ge_threshold_count==2
#print [dI, probeset1,probeset2, anovaNIp, alt_exon_logfold_cutoff];kill
ejd = ExonJunctionData(dI,probeset1,probeset2,pp1,pp2,'upregulated',event_call,critical_exon_list,affygene,ped1,ped2)
ejd.setConstitutiveFold(ge_fold); ejd.setConstitutiveExpression(constit_exp1)
if array_type == 'RNASeq':
ejd.setNovelEvent(novel_event)
splice_event_list.append((dI,ejd))
else: excluded_probeset_db[affygene+':'+critical_exon_list[1][0]] = probeset1, affygene, dI, 'NA', anovaNIp
statistics.adjustPermuteStats(permute_p_values)
ex_db = splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db
original_fold_dbase = fold_dbase; original_avg_const_exp_db=[]; nonlog_NI_db = []; fold_dbase=[]
summary_data_db['denominator_exp_events']=events_examined
del avg_const_exp_db; del gene_db; del constitutive_gene_db; gene_expression_diff_db={}
if export_NI_values == 'yes': adjoutput.close()
print len(splice_event_list), 'alternative exons out of %s exon events examined' % events_examined
fold_dbase=[]; original_fold_dbase=[]; exon_db=[]; constitutive_gene_db=[]; addback_genedb=[]
gene_db=[]; missing_genedb=[]
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
return conditions,adj_fold_dbase,nonlog_NI_db,dataset_name,gene_expression_diff_db,midas_db,ex_db,si_db
class ProbesetExpressionData:
def __init__(self, baseline_exp, experimental_exp, fold_change, adj_fold, ttest_raw_exp, annotation):
self.baseline_exp = baseline_exp; self.experimental_exp = experimental_exp
self.fold_change = fold_change; self.adj_fold = adj_fold
self.ttest_raw_exp = ttest_raw_exp; self.annotation = annotation
def BaselineExp(self): return str(self.baseline_exp)
def ExperimentalExp(self): return str(self.experimental_exp)
def FoldChange(self): return str(self.fold_change)
def AdjFold(self): return str(self.adj_fold)
def ExpPval(self): return str(self.ttest_raw_exp)
def Annotation(self): return self.annotation
def __repr__(self): return self.BaselineExp()+'|'+FoldChange()
def agglomerateInclusionProbesets(array_raw_group_values,exon_inclusion_db):
###Combine expression profiles for inclusion probesets that correspond to the same splice event
for excl_probeset in exon_inclusion_db:
inclusion_event_profiles = []
if len(exon_inclusion_db[excl_probeset])>1:
for incl_probeset in exon_inclusion_db[excl_probeset]:
if incl_probeset in array_raw_group_values and excl_probeset in array_raw_group_values:
array_group_values = array_raw_group_values[incl_probeset]
inclusion_event_profiles.append(array_group_values)
#del array_raw_group_values[incl_probeset] ###Remove un-agglomerated original entry
if len(inclusion_event_profiles) > 0: ###Thus, some probesets for this splice event in input file
combined_event_profile = combine_profiles(inclusion_event_profiles)
###Combine inclusion probesets into a single ID (identical manner to that in ExonAnnotate_module.identifyPutativeSpliceEvents
incl_probesets = exon_inclusion_db[excl_probeset]
incl_probesets_str = string.join(incl_probesets,'|')
array_raw_group_values[incl_probesets_str] = combined_event_profile
return array_raw_group_values
def combine_profiles(profile_list):
profile_group_sizes={}
for db in profile_list:
for key in db: profile_group_sizes[key] = len(db[key])
break
new_profile_db={}
for key in profile_group_sizes:
x = profile_group_sizes[key] ###number of elements in list for key
new_val_list=[]; i = 0
while i<x:
temp_val_list=[]
for db in profile_list:
if key in db: val = db[key][i]; temp_val_list.append(val)
i+=1; val_avg = statistics.avg(temp_val_list); new_val_list.append(val_avg)
new_profile_db[key] = new_val_list
return new_profile_db
def constitutive_exp_normalization(fold_db,stats_dbase,exon_db,constitutive_probeset_db):
"""For every expression value, normalize to the expression of the constitutive gene features for that condition,
then store those ratios (probeset_exp/avg_constitutive_exp) and regenerate expression values relative only to the
baseline avg_constitutive_exp, for all conditions, to normalize out gene expression changes"""
#print "\nParameters:"
#print "Factor_out_expression_changes:",factor_out_expression_changes
#print "Only_include_constitutive_containing_genes:",only_include_constitutive_containing_genes
#print "\nAdjusting probeset average intensity values to factor out condition specific expression changes for optimal splicing descrimination"
gene_db = {}; constitutive_gene_db = {}
### organize everything by gene
for probeset in fold_db: conditions = len(fold_db[probeset]); break
remove_diff_exp_genes = remove_transcriptional_regulated_genes
if conditions > 2: remove_diff_exp_genes = 'no'
for probeset in exon_db:
affygene = exon_db[probeset].GeneID() #exon_db[probeset] = affygene,exons,ensembl,block_exon_ids,block_structure,comparison_info
if probeset in fold_db:
try: gene_db[affygene].append(probeset)
except KeyError: gene_db[affygene] = [probeset]
if probeset in constitutive_probeset_db and (only_include_constitutive_containing_genes == 'yes' or factor_out_expression_changes == 'no'):
#the second conditional is used to exlcude constitutive data if we wish to use all probesets for
#background normalization rather than just the designated 'gene' probesets.
if probeset in stats_dbase:
try: constitutive_gene_db[affygene].append(probeset)
except KeyError: constitutive_gene_db[affygene] = [probeset]
if len(constitutive_gene_db)>0:
###This is blank when there are no constitutive and the above condition is implemented
gene_db2 = constitutive_gene_db
else: gene_db2 = gene_db
avg_const_exp_db = {}
for affygene in gene_db2:
probeset_list = gene_db2[affygene]
x = 0
while x < conditions:
### average all exp values for constitutive probesets for each condition
exp_list=[]
for probeset in probeset_list:
probe_fold_val = fold_db[probeset][x]
baseline_exp = stats_dbase[probeset][0]
exp_val = probe_fold_val + baseline_exp
exp_list.append(exp_val)
avg_const_exp = statistics.avg(exp_list)
try: avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: avg_const_exp_db[affygene] = [avg_const_exp]
x += 1
adj_fold_dbase={}; nonlog_NI_db={}; constitutive_fold_change={}
for affygene in avg_const_exp_db: ###If we only wish to include propper constitutive probes, this will ensure we only examine those genes and probesets that are constitutive
probeset_list = gene_db[affygene]
x = 0
while x < conditions:
exp_list=[]
for probeset in probeset_list:
expr_to_subtract = avg_const_exp_db[affygene][x]
baseline_const_exp = avg_const_exp_db[affygene][0]
probe_fold_val = fold_db[probeset][x]
baseline_exp = stats_dbase[probeset][0]
exp_val = probe_fold_val + baseline_exp
exp_val_non_log = statistics.log_fold_conversion_fraction(exp_val)
expr_to_subtract_non_log = statistics.log_fold_conversion_fraction(expr_to_subtract)
baseline_const_exp_non_log = statistics.log_fold_conversion_fraction(baseline_const_exp)
if factor_out_expression_changes == 'yes':
exp_splice_valff = exp_val_non_log/expr_to_subtract_non_log
else: #if no, then we just normalize to the baseline constitutive expression in order to keep gene expression effects (useful if you don't trust constitutive feature expression levels)
exp_splice_valff = exp_val_non_log/baseline_const_exp_non_log
constitutive_fold_diff = expr_to_subtract_non_log/baseline_const_exp_non_log
###To calculate adjusted expression, we need to get the fold change in the constitutive avg (expr_to_subtract/baseline_const_exp) and divide the experimental expression
###By this fold change.
ge_adj_exp_non_log = exp_val_non_log/constitutive_fold_diff #gives a GE adjusted expression
try: ge_adj_exp = math.log(ge_adj_exp_non_log,2)
except ValueError: print probeset,ge_adj_exp_non_log,constitutive_fold_diff,exp_val_non_log,exp_val,baseline_exp, probe_fold_val, dog
adj_probe_fold_val = ge_adj_exp - baseline_exp
### Here we normalize probeset expression to avg-constitutive expression by dividing probe signal by avg const.prove sig (should be < 1)
### refered to as steady-state normalization
if array_type != 'AltMouse' or (probeset not in constitutive_probeset_db):
"""Can't use constitutive gene features since these have no variance for pearson analysis
Python will approximate numbers to a small decimal point range. If the first fold value is
zero, often, zero will be close to but not exactly zero. Correct below """
try:
adj_fold_dbase[probeset].append(adj_probe_fold_val)
except KeyError:
if abs(adj_probe_fold_val - 0) < 0.0000001: #make zero == exactly to zero
adj_probe_fold_val = 0
adj_fold_dbase[probeset] = [adj_probe_fold_val]
try: nonlog_NI_db[probeset].append(exp_splice_valff) ###ratio of junction exp relative to gene expression at that time-point
except KeyError: nonlog_NI_db[probeset] = [exp_splice_valff]
n = 0
#if expr_to_subtract_non_log != baseline_const_exp_non_log: ###otherwise this is the first value in the expression array
if x!=0: ###previous expression can produce errors when multiple group averages have identical values
fold_change = expr_to_subtract_non_log/baseline_const_exp_non_log
fold_change_log = math.log(fold_change,2)
constitutive_fold_change[affygene] = fold_change_log
### If we want to remove any genes from the analysis with large transcriptional changes
### that may lead to false positive splicing calls (different probeset kinetics)
if remove_diff_exp_genes == 'yes':
if abs(fold_change_log) > log_fold_cutoff:
del constitutive_fold_change[affygene]
try: del adj_fold_dbase[probeset]
except KeyError: n = 1
try: del nonlog_NI_db[probeset]
except KeyError: n = 1
"""elif expr_to_subtract_non_log == baseline_const_exp_non_log: ###This doesn't make sense, since n can't equal 1 if the conditional is false (check this code again later 11/23/07)
if n == 1:
del adj_fold_dbase[probeset]
del nonlog_NI_db[probeset]"""
x += 1
print "Intensity normalization complete..."
if factor_out_expression_changes == 'no':
adj_fold_dbase = fold_db #don't change expression values
print len(constitutive_fold_change), "genes undergoing analysis for alternative splicing/transcription"
summary_data_db['denominator_exp_genes']=len(constitutive_fold_change)
"""
mir_gene_count = 0
for gene in constitutive_fold_change:
if gene in gene_microRNA_denom: mir_gene_count+=1
print mir_gene_count, "Genes with predicted microRNA binding sites undergoing analysis for alternative splicing/transcription"
"""
global gene_analyzed; gene_analyzed = len(constitutive_gene_db)
return adj_fold_dbase, nonlog_NI_db, conditions, gene_db, constitutive_gene_db,constitutive_fold_change, avg_const_exp_db
class TranscriptionData:
def __init__(self, constitutive_fold, rna_processing_annotation):
self._constitutive_fold = constitutive_fold; self._rna_processing_annotation = rna_processing_annotation
def ConstitutiveFold(self): return self._constitutive_fold
def ConstitutiveFoldStr(self): return str(self._constitutive_fold)
def RNAProcessing(self): return self._rna_processing_annotation
def __repr__(self): return self.ConstitutiveFoldStr()+'|'+RNAProcessing()
def constitutive_expression_changes(constitutive_fold_change,annotate_db):
###Add in constitutive fold change filter to assess gene expression for ASPIRE
gene_expression_diff_db = {}
for affygene in constitutive_fold_change:
constitutive_fold = constitutive_fold_change[affygene]; rna_processing_annotation=''
if affygene in annotate_db:
if len(annotate_db[affygene].RNAProcessing()) > 4: rna_processing_annotation = annotate_db[affygene].RNAProcessing()
###Add in evaluation of RNA-processing/binding factor
td = TranscriptionData(constitutive_fold,rna_processing_annotation)
gene_expression_diff_db[affygene] = td
return gene_expression_diff_db
def constitutive_exp_normalization_raw(gene_db,constitutive_gene_db,array_raw_group_values,exon_db,y,avg_const_exp_db):
"""normalize expression for raw expression data (only for non-baseline data)"""
#avg_true_const_exp_db[affygene] = [avg_const_exp]
temp_avg_const_exp_db={}
for probeset in array_raw_group_values:
conditions = len(array_raw_group_values[probeset][y]); break #number of raw expresson values to normalize
for affygene in gene_db:
###This is blank when there are no constitutive or the above condition is implemented
if affygene in constitutive_gene_db:
probeset_list = constitutive_gene_db[affygene]
z = 1
else: ###so we can analyze splicing independent of gene expression even if no 'gene' feature is present
probeset_list = gene_db[affygene]
z = 0
x = 0
while x < conditions:
### average all exp values for constitutive probesets for each conditionF
exp_list=[]
for probeset in probeset_list:
try: exp_val = array_raw_group_values[probeset][y][x] ### try statement is used for constitutive probes that were deleted due to filtering in performExpressionAnalysis
except KeyError: continue
exp_list.append(exp_val)
try: avg_const_exp = statistics.avg(exp_list)
except Exception: avg_const_exp = 'null'
if only_include_constitutive_containing_genes == 'yes' and avg_const_exp != 'null':
if z == 1:
try: avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: avg_const_exp_db[affygene] = [avg_const_exp]
try: temp_avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: temp_avg_const_exp_db[affygene] = [avg_const_exp]
elif avg_const_exp != 'null': ###***
try: avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: avg_const_exp_db[affygene] = [avg_const_exp]
try: temp_avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: temp_avg_const_exp_db[affygene] = [avg_const_exp]
x += 1
if analysis_method == 'ANOVA':
global normalized_raw_exp_ratios; normalized_raw_exp_ratios = {}
for affygene in gene_db:
probeset_list = gene_db[affygene]
for probeset in probeset_list:
while x < group_size:
new_ratios = [] ### Calculate expression ratios relative to constitutive expression
exp_val = array_raw_group_values[probeset][y][x]
const_exp_val = temp_avg_const_exp_db[affygene][x]
###Since the above dictionary is agglomerating all constitutive expression values for permutation,
###we need an unbiased way to grab just those relevant const. exp. vals. (hence the temp dictionary)
#non_log_exp_val = statistics.log_fold_conversion_fraction(exp_val)
#non_log_const_exp_val = statistics.log_fold_conversion_fraction(const_exp_val)
#non_log_exp_ratio = non_log_exp_val/non_log_const_exp_val
log_exp_ratio = exp_val - const_exp_val
try: normalized_raw_exp_ratios[probeset].append(log_exp_ratio)
except KeyError: normalized_raw_exp_ratios[probeset] = [log_exp_ratio]
return avg_const_exp_db
######### Z Score Analyses #######
class ZScoreData:
def __init__(self,element,changed,measured,zscore,null_z,gene_symbols):
self._element = element; self._changed = changed; self._measured = measured
self._zscore = zscore; self._null_z = null_z; self._gene_symbols = gene_symbols
def ElementID(self): return self._element
def Changed(self): return str(self._changed)
def Measured(self): return str(self._measured)
def AssociatedWithElement(self): return str(self._gene_symbols)
def ZScore(self): return str(self._zscore)
def SetP(self,p): self._permute_p = p
def PermuteP(self): return str(self._permute_p)
def SetAdjP(self,adjp): self._adj_p = adjp
def AdjP(self): return str(self._adj_p)
def PercentChanged(self):
try: pc = float(self.Changed())/float(self.Measured())*100
except Exception: pc = 0
return str(pc)
def NullZ(self): return self._null_z
def Report(self):
output = self.ElementID()
return output
def __repr__(self): return self.Report()
class FDRStats(ZScoreData):
def __init__(self,p): self._permute_p = p
def AdjP(self): return str(self._adj_p)
def countGenesForElement(permute_input_list,probeset_to_gene,probeset_element_db):
element_gene_db={}
for probeset in permute_input_list:
try:
element_list = probeset_element_db[probeset]
gene = probeset_to_gene[probeset]
for element in element_list:
try: element_gene_db[element].append(gene)
except KeyError: element_gene_db[element] = [gene]
except KeyError: null=[]
### Count the number of unique genes per element
for element in element_gene_db:
t = {}
for i in element_gene_db[element]: t[i]=[]
element_gene_db[element] = len(t)
return element_gene_db
def formatGeneSymbolHits(geneid_list):
symbol_list=[]
for geneid in geneid_list:
symbol = ''
if geneid in annotate_db: symbol = annotate_db[geneid].Symbol()
if len(symbol)<1: symbol = geneid
symbol_list.append(symbol)
symbol_str = string.join(symbol_list,', ')
return symbol_str
def zscore(r,n,N,R):
z = (r - n*(R/N))/math.sqrt(n*(R/N)*(1-(R/N))*(1-((n-1)/(N-1)))) #z = statistics.zscore(r,n,N,R)
return z
def calculateZScores(hit_count_db,denom_count_db,total_gene_denom_count,total_gene_hit_count,element_type):
N = float(total_gene_denom_count) ###Genes examined
R = float(total_gene_hit_count) ###AS genes
for element in denom_count_db:
element_denom_gene_count = denom_count_db[element]
n = float(element_denom_gene_count) ###all genes associated with element
if element in hit_count_db:
element_hit_gene_count = len(hit_count_db[element])
gene_symbols = formatGeneSymbolHits(hit_count_db[element])
r = float(element_hit_gene_count) ###regulated genes associated with element
else: r = 0; gene_symbols = ''
try: z = zscore(r,n,N,R)
except Exception: z = 0; #print 'error:',element,r,n,N,R; kill
try: null_z = zscore(0,n,N,R)
except Exception: null_z = 0; #print 'error:',element,r,n,N,R; kill
zsd = ZScoreData(element,r,n,z,null_z,gene_symbols)
if element_type == 'domain': original_domain_z_score_data[element] = zsd
elif element_type == 'microRNA': original_microRNA_z_score_data[element] = zsd
permuted_z_scores[element] = [z]
if perform_element_permutation_analysis == 'no':
### The below is an alternative to the permute t-statistic that is more effecient
p = FishersExactTest(r,n,R,N)
zsd.SetP(p)
return N,R
######### Begin Permutation Analysis #######
def calculatePermuteZScores(permute_element_inputs,element_denominator_gene_count,N,R):
###Make this code as efficient as possible
for element_input_gene_count in permute_element_inputs:
for element in element_input_gene_count:
r = element_input_gene_count[element]
n = element_denominator_gene_count[element]
try: z = statistics.zscore(r,n,N,R)
except Exception: z = 0
permuted_z_scores[element].append(abs(z))
#if element == '0005488':
#a.append(r)
def calculatePermuteStats(original_element_z_score_data):
for element in original_element_z_score_data:
zsd = original_element_z_score_data[element]
z = abs(permuted_z_scores[element][0])
permute_scores = permuted_z_scores[element][1:] ###Exclude the true value
nullz = zsd.NullZ()
if abs(nullz) == z: ###Only add the nullz values if they can count towards the p-value (if equal to the original z)
null_z_to_add = permutations - len(permute_scores)
permute_scores+=[abs(nullz)]*null_z_to_add ###Add null_z's in proportion to the amount of times there were not genes found for that element
if len(permute_scores)>0:
p = permute_p(permute_scores,z)
else: p = 1
#if p>1: p=1
zsd.SetP(p)
def FishersExactTest(r,n,R,N):
a = r; b = n-r; c=R-r; d=N-R-b
table = [[int(a),int(b)], [int(c),int(d)]]
try: ### Scipy version - cuts down rutime by ~1/3rd the time
oddsratio, pvalue = stats.fisher_exact(table)
return pvalue
except Exception:
ft = fishers_exact_test.FishersExactTest(table)
return ft.two_tail_p()
def adjustPermuteStats(original_element_z_score_data):
#1. Sort ascending the original input p value vector. Call this spval. Keep the original indecies so you can sort back.
#2. Define a new vector called tmp. tmp= spval. tmp will contain the BH p values.
#3. m is the length of tmp (also spval)
#4. i=m-1
#5 tmp[ i ]=min(tmp[i+1], min((m/i)*spval[ i ],1)) - second to last, last, last/second to last
#6. i=m-2
#7 tmp[ i ]=min(tmp[i+1], min((m/i)*spval[ i ],1))
#8 repeat step 7 for m-3, m-4,... until i=1
#9. sort tmp back to the original order of the input p values.
spval=[]
for element in original_element_z_score_data:
zsd = original_element_z_score_data[element]
p = float(zsd.PermuteP())
spval.append([p,element])
spval.sort(); tmp = spval; m = len(spval); i=m-2; x=0 ###Step 1-4
while i > -1:
tmp[i]=min(tmp[i+1][0], min((float(m)/(i+1))*spval[i][0],1)),tmp[i][1]; i -= 1
for (adjp,element) in tmp:
zsd = original_element_z_score_data[element]
zsd.SetAdjP(adjp)
spval=[]
def permute_p(null_list,true_value):
y = 0; z = 0; x = permutations
for value in null_list:
if value >= true_value: y += 1
#if true_value > 8: global a; a = null_list; print true_value,y,x;kill
return (float(y)/float(x)) ###Multiply probabilty x2?
######### End Permutation Analysis #######
def exportZScoreData(original_element_z_score_data,element_type):
element_output = root_dir+'AltResults/AlternativeOutput/' + dataset_name + analysis_method+'-'+element_type+'-zscores.txt'
data = export.ExportFile(element_output)
headers = [element_type+'-Name','Number Changed','Number Measured','Percent Changed', 'Zscore','PermuteP','AdjP','Changed GeneSymbols']
headers = string.join(headers,'\t')+'\n'
data.write(headers); sort_results=[]
#print "Results for",len(original_element_z_score_data),"elements exported to",element_output
for element in original_element_z_score_data:
zsd=original_element_z_score_data[element]
try: results = [zsd.Changed(), zsd.Measured(), zsd.PercentChanged(), zsd.ZScore(), zsd.PermuteP(), zsd.AdjP(), zsd.AssociatedWithElement()]
except AttributeError: print element,len(permuted_z_scores[element]);kill
results = [element] + results
results = string.join(results,'\t') + '\n'
sort_results.append([float(zsd.PermuteP()),-1/float(zsd.Measured()),results])
sort_results.sort()
for values in sort_results:
results = values[2]
data.write(results)
data.close()
def getInputsForPermutationAnalysis(exon_db):
### Filter fold_dbase, which is the proper denominator
probeset_to_gene = {}; denominator_list = []
for probeset in exon_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
if proceed == 'yes':
gene = exon_db[probeset].GeneID()
probeset_to_gene[probeset] = gene
denominator_list.append(probeset)
return probeset_to_gene,denominator_list
def getJunctionSplicingAnnotations(regulated_exon_junction_db):
filter_status = 'yes'
########### Import critical exon annotation for junctions, build through the exon array analysis pipeline - link back to probesets
filtered_arrayids={}; critical_probeset_annotation_db={}
if array_type == 'RNASeq' and explicit_data_type == 'null':
critical_exon_annotation_file = root_dir+'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_exons.txt'
elif array_type == 'RNASeq' and explicit_data_type != 'null':
critical_exon_annotation_file = root_dir+'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_junctions.txt'
else:
critical_exon_annotation_file = "AltDatabase/"+species+"/"+array_type+"/"+species+"_Ensembl_"+array_type+"_probesets.txt"
critical_exon_annotation_file = filename=getFilteredFilename(critical_exon_annotation_file)
for uid in regulated_exon_junction_db:
gene = regulated_exon_junction_db[uid].GeneID()
critical_exons = regulated_exon_junction_db[uid].CriticalExons()
"""### It appears that each critical exon for junction arrays can be a concatenation of multiple exons, making this unnecessary
if len(critical_exons)>1 and array_type == 'junction':
critical_exons_joined = string.join(critical_exons,'|')
filtered_arrayids[gene+':'+critical_exon].append(uid)"""
for critical_exon in critical_exons:
try:
try: filtered_arrayids[gene+':'+critical_exon].append(uid)
except TypeError: print gene, critical_exon, uid;kill
except KeyError: filtered_arrayids[gene+':'+critical_exon]=[uid]
critical_exon_annotation_db = importSplicingAnnotationDatabase(critical_exon_annotation_file,'exon-fake',filtered_arrayids,filter_status);null=[] ###The file is in exon centric format, so designate array_type as exon
for key in critical_exon_annotation_db:
ced = critical_exon_annotation_db[key]
for junction_probesets in filtered_arrayids[key]:
try: critical_probeset_annotation_db[junction_probesets].append(ced) ###use for splicing and Exon annotations
except KeyError: critical_probeset_annotation_db[junction_probesets] = [ced]
for junction_probesets in critical_probeset_annotation_db:
if len(critical_probeset_annotation_db[junction_probesets])>1: ###Thus multiple exons associated, must combine annotations
exon_ids=[]; external_exonids=[]; exon_regions=[]; splicing_events=[]
for ed in critical_probeset_annotation_db[junction_probesets]:
ensembl_gene_id = ed.GeneID(); transcript_cluster_id = ed.ExternalGeneID()
exon_ids.append(ed.ExonID()); external_exonids.append(ed.ExternalExonIDs()); exon_regions.append(ed.ExonRegionID()); se = string.split(ed.SplicingEvent(),'|')
for i in se: splicing_events.append(i)
splicing_events = unique.unique(splicing_events) ###remove duplicate entries
exon_id = string.join(exon_ids,'|'); external_exonid = string.join(external_exonids,'|'); exon_region = string.join(exon_regions,'|'); splicing_event = string.join(splicing_events,'|')
probe_data = AffyExonSTData(ensembl_gene_id, exon_id, external_exonid, '', exon_region, splicing_event, '','')
if array_type != 'RNASeq': probe_data.setTranscriptCluster(transcript_cluster_id)
critical_probeset_annotation_db[junction_probesets] = probe_data
else:
critical_probeset_annotation_db[junction_probesets] = critical_probeset_annotation_db[junction_probesets][0]
return critical_probeset_annotation_db
def determineExternalType(external_probeset_db):
external_probeset_db2={}
if 'TC' in external_probeset_db:
temp_index={}; i=0; type = 'JETTA'
for name in external_probeset_db['TC'][0]: temp_index[i]=i; i+=1
if 'PS:norm_expr_fold_change' in temp_index: NI_fold_index = temp_index['PS:norm_expr_fold_change']
if 'MADS:pv_1over2' in temp_index: MADS_p1_index = temp_index['MADS:pv_1over2']
if 'MADS:pv_2over1' in temp_index: MADS_p2_index = temp_index['MADS:pv_2over1']
if 'TC:expr_fold_change' in temp_index: MADS_p2_index = temp_index['MADS:pv_2over1']
if 'PsId' in temp_index: ps_index = temp_index['PsId']
for tc in external_probeset_db:
for list in external_probeset_db[tc]:
try: NI_fold = float(list[NI_fold_index])
except Exception: NI_fold = 1
try: MADSp1 = float(list[MADS_p1_index])
except Exception: MADSp1 = 1
try: MADSp2 = float(list[MADS_p2_index])
except Exception: MADSp1 = 1
if MADSp1<MADSp2: pval = MADSp1
else: pval = MADSp2
probeset = list[ps_index]
external_probeset_db2[probeset] = NI_fold,pval
else:
type = 'generic'
a = []; b = []
for id in external_probeset_db:
#print external_probeset_db[id]
try: a.append(abs(float(external_probeset_db[id][0][0])))
except Exception: null=[]
try: b.append(abs(float(external_probeset_db[id][0][1])))
except Exception: null=[]
a.sort(); b.sort(); pval_index = None; score_index = None
if len(a)>0:
if max(a) > 1: score_index = 0
else: pval_index = 0
if len(b)>0:
if max(b) > 1: score_index = 1
else: pval_index = 1
for id in external_probeset_db:
if score_index != None: score = external_probeset_db[id][0][score_index]
else: score = 1
if pval_index != None: pval = external_probeset_db[id][0][pval_index]
else: pval = 1
external_probeset_db2[id] = score,pval
return external_probeset_db2, type
def importExternalProbesetData(dataset_dir):
excluded_probeset_db={}; splice_event_list=[]; p_value_call={}; permute_p_values={}; gene_expression_diff_db={}
analyzed_probeset_db = {}
external_probeset_db = importExternalDBList(dataset_dir)
external_probeset_db, ext_type = determineExternalType(external_probeset_db)
for probeset in exon_db: analyzed_probeset_db[probeset] = []
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db)>0:
temp_db={}
for probeset in analyzed_probeset_db: temp_db[probeset]=[]
for probeset in temp_db:
try: filtered_probeset_db[probeset]
except KeyError: del analyzed_probeset_db[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try: del analyzed_probeset_db[probeset]
except KeyError: null=[]
for probeset in analyzed_probeset_db:
ed = exon_db[probeset]; geneid = ed.GeneID()
td = TranscriptionData('',''); gene_expression_diff_db[geneid] = td
if probeset in external_probeset_db:
exonid = ed.ExonID(); critical_exon_list = [1,[exonid]]
splicing_index,normIntensityP = external_probeset_db[probeset]
group1_ratios=[]; group2_ratios=[];exp_log_ratio=''; ttest_exp_p='';normIntensityP='';opposite_SI_log_mean=''
sid = ExonData(splicing_index,probeset,critical_exon_list,geneid,group1_ratios,group2_ratios,normIntensityP,opposite_SI_log_mean)
splice_event_list.append((splicing_index,sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(0,geneid,'NA')
excluded_probeset_db[probeset] = eed
print len(splice_event_list), 'pre-filtered external results imported...\n'
return splice_event_list, p_value_call, permute_p_values, excluded_probeset_db, gene_expression_diff_db
def splicingAnalysisAlgorithms(nonlog_NI_db,fold_dbase,dataset_name,gene_expression_diff_db,exon_db,ex_db,si_db,dataset_dir):
protein_exon_feature_db={}; global regulated_exon_junction_db; global critical_exon_annotation_db; global probeset_comp_db; probeset_comp_db={}
if original_conditions == 2: print "Beginning to run", analysis_method, "algorithm on",dataset_name[0:-1],"data"
if run_from_scratch == 'Annotate External Results':
splice_event_list, p_value_call, permute_p_values, excluded_probeset_db, gene_expression_diff_db = importExternalProbesetData(dataset_dir)
elif analysis_method == 'ASPIRE' or analysis_method == 'linearregres':
original_exon_db = exon_db
if original_conditions > 2:
splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db = ex_db
splice_event_list, p_value_call, permute_p_values, exon_db, regulated_exon_junction_db = furtherProcessJunctionScores(splice_event_list, probeset_comp_db, permute_p_values)
else:
splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db = analyzeJunctionSplicing(nonlog_NI_db)
splice_event_list, p_value_call, permute_p_values, exon_db, regulated_exon_junction_db = furtherProcessJunctionScores(splice_event_list, probeset_comp_db, permute_p_values)
elif analysis_method == 'splicing-index':
regulated_exon_junction_db = {}
if original_conditions > 2:
excluded_probeset_db = ex_db; splice_event_list = si_db;
clearObjectsFromMemory(ex_db); clearObjectsFromMemory(si_db)
ex_db=[]; si_db=[]; permute_p_values={}; p_value_call=''
else: splice_event_list, p_value_call, permute_p_values, excluded_probeset_db = analyzeSplicingIndex(fold_dbase)
elif analysis_method == 'FIRMA':
regulated_exon_junction_db = {}
splice_event_list, p_value_call, permute_p_values, excluded_probeset_db = FIRMAanalysis(fold_dbase)
global permuted_z_scores; permuted_z_scores={}; global original_domain_z_score_data; original_domain_z_score_data={}
global original_microRNA_z_score_data; original_microRNA_z_score_data={}
nonlog_NI_db=[] ### Clear memory of this large dictionary
try: clearObjectsFromMemory(original_avg_const_exp_db); clearObjectsFromMemory(array_raw_group_values)
except Exception: null=[]
try: clearObjectsFromMemory(avg_const_exp_db)
except Exception: null=[]
try: clearObjectsFromMemory(alt_junction_db)
except Exception: null=[]
try: clearObjectsFromMemory(fold_dbase); fold_dbase=[]
except Exception: null=[]
microRNA_full_exon_db,microRNA_count_db,gene_microRNA_denom = ExonAnalyze_module.importmicroRNADataExon(species,array_type,exon_db,microRNA_prediction_method,explicit_data_type,root_dir)
#print "MicroRNA data imported"
if use_direct_domain_alignments_only == 'yes':
protein_ft_db_len,domain_associated_genes = importProbesetAligningDomains(exon_db,'gene')
else: protein_ft_db_len,domain_associated_genes = importProbesetProteinCompDomains(exon_db,'gene','exoncomp')
if perform_element_permutation_analysis == 'yes':
probeset_to_gene,denominator_list = getInputsForPermutationAnalysis(exon_db)
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
exon_gene_array_translation_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_'+array_type+'-exon_probesets.txt'
try: exon_array_translation_db = importGeneric(exon_gene_array_translation_file)
except Exception: exon_array_translation_db={} ### Not present for all species
exon_hits={}; clearObjectsFromMemory(probeset_comp_db); probeset_comp_db=[]
###Run analyses in the ExonAnalyze_module module to assess functional changes
for (score,ed) in splice_event_list:
geneid = ed.GeneID()
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
pl = string.split(ed.Probeset1(),'|'); probeset1 = pl[0] ### When agglomerated, this is important
uid = (probeset1,ed.Probeset2())
else: uid = ed.Probeset1()
gene_exon = geneid,uid; exon_hits[gene_exon] = ed
#print probeset1,ed.Probeset1(),ed.Probeset2(),gene_exon,ed.CriticalExons()
dataset_name_original = analysis_method+'-'+dataset_name[8:-1]
global functional_attribute_db; global protein_features
### Possibly Block-out code for DomainGraph export
########### Re-import the exon_db for significant entries with full annotaitons
exon_db={}; filtered_arrayids={}; filter_status='yes' ###Use this as a means to save memory (import multiple times - only storing different types relevant information)
for (score,entry) in splice_event_list:
try: probeset = original_exon_db[entry.Probeset1()].Probeset()
except Exception: probeset = entry.Probeset1()
pl = string.split(probeset,'|'); probeset = pl[0]; filtered_arrayids[probeset] = [] ### When agglomerated, this is important
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
try: probeset = entry.Probeset2(); filtered_arrayids[probeset] = []
except AttributeError: null =[] ###occurs when running Splicing
exon_db = importSplicingAnnotationDatabase(probeset_annotations_file,array_type,filtered_arrayids,filter_status);null=[] ###replace existing exon_db (probeset_annotations_file should be a global)
###domain_gene_changed_count_db is the number of genes for each domain that are found for regulated probesets
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if use_direct_domain_alignments_only == 'yes':
protein_features,domain_gene_changed_count_db,functional_attribute_db = importProbesetAligningDomains(regulated_exon_junction_db,'probeset')
else: protein_features,domain_gene_changed_count_db,functional_attribute_db = importProbesetProteinCompDomains(regulated_exon_junction_db,'probeset','exoncomp')
else:
if use_direct_domain_alignments_only == 'yes':
protein_features,domain_gene_changed_count_db,functional_attribute_db = importProbesetAligningDomains(exon_db,'probeset')
else: protein_features,domain_gene_changed_count_db,functional_attribute_db = importProbesetProteinCompDomains(exon_db,'probeset','exoncomp')
filtered_microRNA_exon_db = ExonAnalyze_module.filterMicroRNAProbesetAssociations(microRNA_full_exon_db,exon_hits)
microRNA_full_exon_db=[]
###add microRNA data to functional_attribute_db
microRNA_hit_gene_count_db = {}; all_microRNA_gene_hits={}; microRNA_attribute_db={}; probeset_mirBS_db={}
for (affygene,uid) in filtered_microRNA_exon_db: ###example ('G7091354', 'E20|') [('hsa-miR-130a', 'Pbxip1'), ('hsa-miR-130a', 'Pbxip1'
###3-1-08
miR_list = []
microRNA_symbol_list = filtered_microRNA_exon_db[(affygene,uid)]
for mir_key in microRNA_symbol_list:
microRNA,gene_symbol,miR_seq, miR_sources = mir_key
#if 'ENS' in microRNA: print microRNA; kill ### bug in some miRNA annotations introduced in the build process
specific_microRNA_tuple = (microRNA,'~')
try: microRNA_hit_gene_count_db[microRNA].append(affygene)
except KeyError: microRNA_hit_gene_count_db[microRNA] = [affygene]
###Create a database with the same structure as "protein_exon_feature_db"(below) for over-representation analysis (direction specific), after linking up splice direction data
try: microRNA_attribute_db[(affygene,uid)].append(specific_microRNA_tuple)
except KeyError: microRNA_attribute_db[(affygene,uid)] = [specific_microRNA_tuple]
miR_data = microRNA+':'+miR_sources
miR_list.append(miR_data) ###Add miR information to the record
function_type = ('miR-sequence: ' +'('+miR_data+')'+miR_seq,'~') ###Add miR sequence information to the sequence field of the report
try: functional_attribute_db[(affygene,uid)].append(function_type)
except KeyError: functional_attribute_db[(affygene,uid)]=[function_type]
#print (affygene,uid), [function_type];kill
if perform_element_permutation_analysis == 'yes':
try: probeset_mirBS_db[uid].append(microRNA)
except KeyError: probeset_mirBS_db[uid] = [microRNA]
miR_str = string.join(miR_list,','); miR_str = '('+miR_str+')'
function_type = ('microRNA-target'+miR_str,'~')
try: functional_attribute_db[(affygene,uid)].append(function_type)
except KeyError: functional_attribute_db[(affygene,uid)]=[function_type]
all_microRNA_gene_hits[affygene] = []
###Replace the gene list for each microRNA hit with count data
microRNA_hit_gene_count_db = eliminate_redundant_dict_values(microRNA_hit_gene_count_db)
###Combines any additional feature alignment info identified from 'ExonAnalyze_module.characterizeProteinLevelExonChanges' (e.g. from Ensembl or junction-based queries rather than exon specific) and combines
###this with this database of (Gene,Exon)=[(functional element 1,'~'),(functional element 2,'~')] for downstream result file annotatations
domain_hit_gene_count_db = {}; all_domain_gene_hits = {}; probeset_domain_db={}
for entry in protein_features:
gene,uid = entry
for data_tuple in protein_features[entry]:
domain,call = data_tuple
try: protein_exon_feature_db[entry].append(data_tuple)
except KeyError: protein_exon_feature_db[entry] = [data_tuple]
try: domain_hit_gene_count_db[domain].append(gene)
except KeyError: domain_hit_gene_count_db[domain] = [gene]
all_domain_gene_hits[gene]=[]
if perform_element_permutation_analysis == 'yes':
try: probeset_domain_db[uid].append(domain)
except KeyError: probeset_domain_db[uid] = [domain]
protein_features=[]; domain_gene_changed_count_db=[]
###Replace the gene list for each microRNA hit with count data
domain_hit_gene_count_db = eliminate_redundant_dict_values(domain_hit_gene_count_db)
############ Perform Element Over-Representation Analysis ############
"""Domain/FT Fishers-Exact test: with "protein_exon_feature_db" (transformed to "domain_hit_gene_count_db") we can analyze over-representation of domain/features WITHOUT taking into account exon-inclusion or exclusion
Do this using: "domain_associated_genes", which contains domain tuple ('Tyr_pkinase', 'IPR001245') as a key and count in unique genes as the value in addition to
Number of genes linked to splice events "regulated" (SI and Midas p<0.05), number of genes with constitutive probesets
MicroRNA Fishers-Exact test: "filtered_microRNA_exon_db" contains gene/exon to microRNA data. For each microRNA, count the representation in spliced genes microRNA (unique gene count - make this from the mentioned file)
Do this using: "microRNA_count_db"""
domain_gene_counts = {} ### Get unique gene counts for each domain
for domain in domain_associated_genes:
domain_gene_counts[domain] = len(domain_associated_genes[domain])
total_microRNA_gene_hit_count = len(all_microRNA_gene_hits)
total_microRNA_gene_denom_count = len(gene_microRNA_denom)
Nm,Rm = calculateZScores(microRNA_hit_gene_count_db,microRNA_count_db,total_microRNA_gene_denom_count,total_microRNA_gene_hit_count,'microRNA')
gene_microRNA_denom =[]
summary_data_db['miRNA_gene_denom'] = total_microRNA_gene_denom_count
summary_data_db['miRNA_gene_hits'] = total_microRNA_gene_hit_count
summary_data_db['alt_events']=len(splice_event_list)
total_domain_gene_hit_count = len(all_domain_gene_hits)
total_domain_gene_denom_count = protein_ft_db_len ###genes connected to domain annotations
Nd,Rd = calculateZScores(domain_hit_gene_count_db,domain_gene_counts,total_domain_gene_denom_count,total_domain_gene_hit_count,'domain')
microRNA_hit_gene_counts={}; gene_to_miR_db={} ### Get unique gene counts for each miR and the converse
for microRNA in microRNA_hit_gene_count_db:
microRNA_hit_gene_counts[microRNA] = len(microRNA_hit_gene_count_db[microRNA])
for gene in microRNA_hit_gene_count_db[microRNA]:
try: gene_to_miR_db[gene].append(microRNA)
except KeyError: gene_to_miR_db[gene] = [microRNA]
gene_to_miR_db = eliminate_redundant_dict_values(gene_to_miR_db)
if perform_element_permutation_analysis == 'yes':
###Begin Domain/microRNA Permute Analysis
input_count = len(splice_event_list) ### Number of probesets or probeset pairs (junction array) alternatively regulated
original_increment = int(permutations/20); increment = original_increment
start_time = time.time(); print 'Permuting the Domain/miRBS analysis %d times' % permutations
x=0; permute_domain_inputs=[]; permute_miR_inputs=[]
while x<permutations:
if x == increment: increment+=original_increment; print '*',
permute_input_list = random.sample(denominator_list,input_count); x+=1
permute_domain_input_gene_counts = countGenesForElement(permute_input_list,probeset_to_gene,probeset_domain_db)
permute_domain_inputs.append(permute_domain_input_gene_counts)
permute_miR_input_gene_counts = countGenesForElement(permute_input_list,probeset_to_gene,probeset_mirBS_db)
permute_miR_inputs.append(permute_miR_input_gene_counts)
calculatePermuteZScores(permute_domain_inputs,domain_gene_counts,Nd,Rd)
calculatePermuteZScores(permute_miR_inputs,microRNA_hit_gene_counts,Nm,Rm)
calculatePermuteStats(original_domain_z_score_data)
calculatePermuteStats(original_microRNA_z_score_data)
adjustPermuteStats(original_domain_z_score_data)
adjustPermuteStats(original_microRNA_z_score_data)
exportZScoreData(original_domain_z_score_data,'ft-domain')
exportZScoreData(original_microRNA_z_score_data,'microRNA')
end_time = time.time(); time_diff = int(end_time-start_time)
print "Enrichment p-values for Domains/miRBS calculated in %d seconds" % time_diff
denominator_list=[]
try: clearObjectsFromMemory(original_microRNA_z_score_data)
except Exception: null=[]
microRNA_hit_gene_count_db={}; microRNA_hit_gene_counts={};
clearObjectsFromMemory(permuted_z_scores); permuted_z_scores=[]; original_domain_z_score_data=[]
if (array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null')) and analysis_method != 'splicing-index':
critical_probeset_annotation_db = getJunctionSplicingAnnotations(regulated_exon_junction_db)
probeset_aligning_db = importProbesetAligningDomains(regulated_exon_junction_db,'perfect_match')
else: probeset_aligning_db = importProbesetAligningDomains(exon_db,'perfect_match')
############ Export exon/junction level results ############
splice_event_db={}; protein_length_list=[]; aspire_gene_results={}
critical_gene_exons={}; unique_exon_event_db={}; comparison_count={}; direct_domain_gene_alignments={}
functional_attribute_db2={}; protein_exon_feature_db2={}; microRNA_exon_feature_db2={}
external_exon_annot={}; gene_exon_region={}; gene_smallest_p={}; gene_splice_event_score={}; alternatively_reg_tc={}
aspire_output = root_dir+'AltResults/AlternativeOutput/' + dataset_name + analysis_method+'-exon-inclusion-results.txt'
data = export.ExportFile(aspire_output)
goelite_output = root_dir+'GO-Elite/AltExon/AS.'+ dataset_name + analysis_method+'.txt'
goelite_data = export.ExportFile(goelite_output); gcn=0
#print 'LENGTH OF THE GENE ANNOTATION DATABASE',len(annotate_db)
if array_type != 'AltMouse':
DG_output = root_dir+'AltResults/DomainGraph/' + dataset_name + analysis_method+'-DomainGraph.txt'
DG_data = export.ExportFile(DG_output)
### Write out only the inclusion hits to a subdir
SRFinder_inclusion = root_dir+'GO-Elite/exon/' + dataset_name + analysis_method+'-inclusion.txt'
SRFinder_in_data = export.ExportFile(SRFinder_inclusion)
SRFinder_in_data.write('probeset\tSystemCode\tdeltaI\tp-value\n')
### Write out only the exclusion hits to a subdir
SRFinder_exclusion = root_dir+'GO-Elite/exon/' + dataset_name + analysis_method+'-exclusion.txt'
SRFinder_ex_data = export.ExportFile(SRFinder_exclusion)
SRFinder_ex_data.write('probeset\tSystemCode\tdeltaI\tp-value\n')
### Write out only the denominator set to a subdir
SRFinder_denom = root_dir+'GO-Elite/exon_denominator/' + species+'-'+array_type+'.txt'
SRFinder_denom_data = export.ExportFile(SRFinder_denom)
SRFinder_denom_data.write('probeset\tSystemCode\n')
ens_version = unique.getCurrentGeneDatabaseVersion()
ProcessedSpliceData_output = string.replace(DG_output,'DomainGraph','ProcessedSpliceData') ### This is the same as the DG export but without converting the probeset IDs for non-exon arrays
ProcessedSpliceData_data = export.ExportFile(ProcessedSpliceData_output)
if ens_version == '':
try:
elite_db_versions = UI.returnDirectoriesNoReplace('/AltDatabase')
if len(elite_db_versions)>0: ens_version = elite_db_versions[0]
except Exception: null=[]
ens_version = string.replace(ens_version,'EnsMart','ENS_')
DG_data.write(ens_version+"\n")
DG_data.write("Probeset\tGeneID\tRegulation call\tSI\tSI p-value\tMiDAS p-value\n")
ProcessedSpliceData_data.write("ExonID(s)\tGeneID\tRegulation call\t"+analysis_method+"\t"+analysis_method+" p-value\tMiDAS p-value\n")
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
if perform_permutation_analysis == 'yes': p_value_type = 'permutation-values'
else: p_value_type = 'FDR-'+p_value_call
if array_type == 'AltMouse': gene_name = 'AffyGene'; extra_transcript_annotation = 'block_structure'; extra_exon_annotation = 'splice_event_description'
if array_type == 'junction' or array_type == 'RNASeq':
gene_name = 'Ensembl'; extra_transcript_annotation = 'transcript cluster ID'; extra_exon_annotation = 'distal exon-region-ID'
goelite_data.write("GeneID\tSystemCode\tscore\tp-value\tSymbol\tExonIDs\n")
if array_type == 'RNASeq':
id1='junctionID-1'; id2='junctionID-2'; loc_column='exon/junction locations'
extra_transcript_annotation = 'Known/Novel Feature'
else: id1='probeset1'; id2='probeset2'; loc_column='probeset locations'
title = [gene_name,analysis_method,'symbol','description','exons1','exons2','regulation_call','event_call',id1,'norm-p1',id2,'norm-p2','fold1','fold2']
title +=['adj-fold1' ,'adj-fold2' ,extra_transcript_annotation,'critical_up_exons','critical_down_exons','functional_prediction','uniprot-ens_feature_predictions']
title +=['peptide_predictions','exp1','exp2','ens_overlapping_domains','constitutive_baseline_exp',p_value_call,p_value_type,'permutation-false-positives']
title +=['gene-expression-change', extra_exon_annotation ,'ExternalExonIDs','ExonRegionID','SplicingEvent','ExonAnnotationScore','large_splicing_diff',loc_column]
else:
goelite_data.write("GeneID\tSystemCode\tSI\tSI p-value\tMiDAS p-value\tSymbol\tExonID\n")
if analysis_method == 'splicing-index':
NIpval = 'SI_rawp'; splicing_score = 'Splicing-Index'; lowestp = 'lowest_p (MIDAS or SI)'; AdjPcolumn = 'Deviation-Value'; #AdjPcolumn = 'SI_adjp'
else:
NIpval = 'FIRMA_rawp'; splicing_score = 'FIRMA_fold'; lowestp = 'lowest_p (MIDAS or FIRMA)'; AdjPcolumn = 'Deviation-Value'; #AdjPcolumn = 'FIRMA_adjp'
if array_type == 'RNASeq':
id1='junctionID'; pval_column='junction p-value'; loc_column='junction location'
else: id1='probeset'; pval_column='probeset p-value'; loc_column='probeset location'
if array_type == 'RNASeq': secondary_ID_title = 'Known/Novel Feature'
else: secondary_ID_title = 'alternative gene ID'
title= ['Ensembl',splicing_score,'symbol','description','exons','regulation_call',id1,pval_column,lowestp,'midas p-value','fold','adjfold']
title+=['up_exons','down_exons','functional_prediction','uniprot-ens_feature_predictions','peptide_predictions','ens_overlapping_domains','baseline_probeset_exp']
title+=['constitutive_baseline_exp',NIpval,AdjPcolumn,'gene-expression-change']
title+=[secondary_ID_title, 'ensembl exons', 'consitutive exon', 'exon-region-ID', 'exon annotations','distal exon-region-ID',loc_column]
title = string.join(title,'\t') + '\n'
try:
if original_conditions>2: title = string.replace(title,'regulation_call','conditions_compared')
except Exception: null=[]
data.write(title)
### Calculate adjusted normalized intensity p-values
fdr_exon_stats={}
if analysis_method != 'ASPIRE' and 'linearregres' not in analysis_method:
for (score,entry) in splice_event_list: ### These are all "significant entries"
fds = FDRStats(entry.TTestNormalizedRatios())
fdr_exon_stats[entry.Probeset1()] = fds
for probeset in excluded_probeset_db: ### These are all "non-significant entries"
fds = FDRStats(excluded_probeset_db[probeset].TTestNormalizedRatios())
fdr_exon_stats[probeset] = fds
try: adjustPermuteStats(fdr_exon_stats)
except Exception: null=[]
### Calculate score average and stdev for each gene to alter get a Deviation Value
gene_deviation_db={}
for (score,entry) in splice_event_list:
dI = entry.Score(); geneID = entry.GeneID()
try: gene_deviation_db[geneID].append(dI)
except Exception: gene_deviation_db[geneID] = [dI]
for i in excluded_probeset_db:
entry = excluded_probeset_db[i]
try: dI = entry.Score(); geneID = entry.GeneID()
except Exception: geneID = entry[1]; dI = entry[-1]
try: gene_deviation_db[geneID].append(dI)
except Exception: None ### Don't include genes with no hits
for geneID in gene_deviation_db:
try:
avg_dI=statistics.avg(gene_deviation_db[geneID])
stdev_dI=statistics.stdev(gene_deviation_db[geneID])
gene_deviation_db[geneID] = avg_dI,stdev_dI
except Exception:
gene_deviation_db[geneID] = 'NA','NA'
event_count = 0
for (score,entry) in splice_event_list:
event_count += 1
dI = entry.Score(); probeset1 = entry.Probeset1(); regulation_call = entry.RegulationCall(); event_call = entry.EventCall();critical_exon_list = entry.CriticalExonTuple()
probeset1_display = probeset1; selected_probeset = probeset1
if agglomerate_inclusion_probesets == 'yes':
if array_type == 'AltMouse':
exons1 = original_exon_db[probeset1].ExonID()
try: probeset1 = original_exon_db[probeset1].Probeset()
except Exception: null=[]
else:
probeset1 = probeset1; exons1 = original_exon_db[probeset1].ExonID()
try: selected_probeset = original_exon_db[probeset1].Probeset()
except Exception: selected_probeset = probeset1
else:
try: exons1 = exon_db[probeset1].ExonID()
except Exception:
print probeset1, len(exon_db)
for i in exon_db: print i; break
kill
critical_probeset_list = [selected_probeset]
affygene = entry.GeneID()
### Calculate deviation value for each exon
avg_dI,stdev_dI = gene_deviation_db[affygene]
try: DV = deviation(dI,avg_dI,stdev_dI) ### Note: the dI values are always in log2 space, independent of platform
except Exception: DV = 'NA'
if affygene in annotate_db: description = annotate_db[affygene].Description(); symbol = annotate_db[affygene].Symbol()
else: description = ''; symbol = ''
ped1 = entry.ProbesetExprData1(); adjfold1 = ped1.AdjFold(); exp1 = ped1.BaselineExp(); fold1 = ped1.FoldChange(); rawp1 = ped1.ExpPval()
### Get Constitutive expression values
baseline_const_exp = entry.ConstitutiveExpression() ### For multiple group comparisosn
#if affygene in gene_expression_diff_db: mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
try: mean_fold_change = str(entry.ConstitutiveFold()) ### For multi-condition analyses, the gene expression is dependent on the conditions compared
except Exception: mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
probeset2 = entry.Probeset2(); exons2 = exon_db[probeset2].ExonID(); rawp1 = str(entry.TTestNormalizedRatios()); rawp2 = str(entry.TTestNormalizedRatios2()); critical_probeset_list.append(probeset2)
ped2 = entry.ProbesetExprData2(); adjfold2 = ped2.AdjFold(); exp2 = ped2.BaselineExp(); fold2 = ped2.FoldChange()
try: location_summary=original_exon_db[selected_probeset].LocationSummary()+'|'+original_exon_db[probeset2].LocationSummary()
except Exception:
try: location_summary=exon_db[selected_probeset].LocationSummary()+'|'+exon_db[probeset2].LocationSummary()
except Exception: location_summary=''
if array_type == 'AltMouse':
extra_transcript_annotation = exon_db[probeset1].GeneStructure()
else:
try: extra_exon_annotation = last_exon_region_db[affygene]
except KeyError: extra_exon_annotation = ''
try:
tc1 = original_exon_db[probeset1].SecondaryGeneID()
tc2 = original_exon_db[probeset2].SecondaryGeneID() ### Transcript Cluster
probeset_tc = makeUnique([tc1,tc2])
extra_transcript_annotation = string.join(probeset_tc,'|')
try: alternatively_reg_tc[affygene] += probeset_tc
except KeyError: alternatively_reg_tc[affygene] = probeset_tc
except Exception: extra_transcript_annotation=''
if array_type == 'RNASeq':
try: extra_transcript_annotation = entry.NovelEvent() ### Instead of secondary gene ID, list known vs. novel reciprocal junction annotation
except Exception: None
exp_list = [float(exp1),float(exp2),float(exp1)+float(fold1),float(exp2)+float(fold2)]; exp_list.sort(); exp_list.reverse()
probeset_tuple = (probeset1,probeset2)
else:
try: exp_list = [float(exp1),float(exp1)+float(fold1)]; exp_list.sort(); exp_list.reverse()
except Exception: exp_list = ['']
probeset_tuple = (probeset1)
highest_exp = exp_list[0]
###Use permuted p-value or lowest expression junction p-value based on the situtation
###This p-value is used to filter out aspire events for further analyses
if len(p_value_call)>0:
if probeset_tuple in permute_p_values:
lowest_raw_p, pos_permute, total_permute, false_pos = permute_p_values[probeset_tuple]
else: lowest_raw_p = "NA"; pos_permute = "NA"; total_permute = "NA"; false_pos = "NA"
else:
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method: raw_p_list = [entry.TTestNormalizedRatios(),entry.TTestNormalizedRatios2()] #raw_p_list = [float(rawp1),float(rawp2)]; raw_p_list.sort()
else:
try: raw_p_list = [float(entry.TTestNormalizedRatios())] ###Could also be rawp1, but this is more appropriate
except Exception: raw_p_list = [1] ### Occurs when p='NA'
raw_p_list.sort()
lowest_raw_p = raw_p_list[0]; pos_permute = "NA"; total_permute = "NA"; false_pos = "NA"
if perform_permutation_analysis == 'yes':
p_value_extra = str(pos_permute)+' out of '+str(total_permute)
else: p_value_extra = str(pos_permute)
up_exons = ''; down_exons = ''; up_exon_list = []; down_exon_list = []; gene_exon_list=[]
exon_data = critical_exon_list
variable = exon_data[0]
if variable == 1 and regulation_call == 'upregulated':
for exon in exon_data[1]:
up_exons = up_exons + exon + ',';up_exon_list.append(exon)
key = affygene,exon+'|'; gene_exon_list.append(key)
elif variable == 1 and regulation_call == 'downregulated':
for exon in exon_data[1]:
down_exons = down_exons + exon + ',';down_exon_list.append(exon)
key = affygene,exon+'|';gene_exon_list.append(key)
else:
try: exon1 = exon_data[1][0]; exon2 = exon_data[1][1]
except Exception: print exon_data;kill
if adjfold1 > 0:
up_exons = up_exons + exon1 + ',';down_exons = down_exons + exon2 + ','
up_exon_list.append(exon1); down_exon_list.append(exon2)
key = affygene,exon1+'|'; gene_exon_list.append(key);key = affygene,exon2+'|'; gene_exon_list.append(key)
else:
up_exons = up_exons + exon2 + ',';down_exons = down_exons + exon1 + ','
up_exon_list.append(exon2); down_exon_list.append(exon1)
key = affygene,exon1+'|'; gene_exon_list.append(key); key = affygene,exon2+'|'; gene_exon_list.append(key)
up_exons = up_exons[0:-1];down_exons = down_exons[0:-1]
try: ### Get comparisons group annotation data for multigroup comparison analyses
if original_conditions>2:
try: regulation_call = ped1.Annotation()
except Exception: null=[]
except Exception: null=[]
###Format functional results based on exon level fold change
null = []
#global a; a = exon_hits; global b; b=microRNA_attribute_db; kill
"""if 'G7100684@J934332_RC@j_at' in critical_probeset_list:
print probeset1, probeset2, gene, critical_probeset_list, 'blah'
if ('G7100684', ('G7100684@J934333_RC@j_at', 'G7100684@J934332_RC@j_at')) in functional_attribute_db:
print functional_attribute_db[('G7100684', ('G7100684@J934333_RC@j_at', 'G7100684@J934332_RC@j_at'))];blah
blah"""
new_functional_attribute_str, functional_attribute_list2, seq_attribute_str,protein_length_list = format_exon_functional_attributes(affygene,critical_probeset_list,functional_attribute_db,up_exon_list,down_exon_list,protein_length_list)
new_uniprot_exon_feature_str, uniprot_exon_feature_list, null, null = format_exon_functional_attributes(affygene,critical_probeset_list,protein_exon_feature_db,up_exon_list,down_exon_list,null)
null, microRNA_exon_feature_list, null, null = format_exon_functional_attributes(affygene,critical_probeset_list,microRNA_attribute_db,up_exon_list,down_exon_list,null)
if len(new_functional_attribute_str) == 0: new_functional_attribute_str = ' '
if len(new_uniprot_exon_feature_str) == 0: new_uniprot_exon_feature_str = ' '
if len(seq_attribute_str) > 12000: seq_attribute_str = 'The sequence is too long to report for spreadsheet analysis'
### Add entries to a database to quantify the number of reciprocal isoforms regulated
reciprocal_isoform_data = [len(critical_exon_list[1]),critical_exon_list[1],event_call,regulation_call]
try: float((lowest_raw_p))
except ValueError: lowest_raw_p=0
if (float((lowest_raw_p))<=p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try: unique_exon_event_db[affygene].append(reciprocal_isoform_data)
except KeyError: unique_exon_event_db[affygene] = [reciprocal_isoform_data]
### Add functional attribute information to a new database
for item in uniprot_exon_feature_list:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p))<=p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try: protein_exon_feature_db2[affygene,attribute].append(exon)
except KeyError: protein_exon_feature_db2[affygene,attribute]=[exon]
### Add functional attribute information to a new database
"""Database not used for exon/junction data export but for over-representation analysis (direction specific)"""
for item in microRNA_exon_feature_list:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p))<=p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try: microRNA_exon_feature_db2[affygene,attribute].append(exon)
except KeyError: microRNA_exon_feature_db2[affygene,attribute]=[exon]
### Add functional attribute information to a new database
for item in functional_attribute_list2:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p))<=p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try: functional_attribute_db2[affygene,attribute].append(exon)
except KeyError: functional_attribute_db2[affygene,attribute]=[exon]
try:
abs_fold = abs(float(mean_fold_change)); fold_direction = 'down'; fold1_direction = 'down'; fold2_direction = 'down'
large_splicing_diff1 = 0; large_splicing_diff2 = 0; large_splicing_diff = 'null'; opposite_splicing_pattern = 'no'
if float(mean_fold_change)>0: fold_direction = 'up'
if float(fold1)>0: fold1_direction = 'up'
if fold1_direction != fold_direction:
if float(fold1)>float(mean_fold_change): large_splicing_diff1 = float(fold1)-float(mean_fold_change)
except Exception:
fold_direction = ''; large_splicing_diff = ''; opposite_splicing_pattern = ''
if analysis_method != 'ASPIRE' and 'linearregres' not in analysis_method: ed = exon_db[probeset1]
else:
try: ed = critical_probeset_annotation_db[selected_probeset,probeset2]
except KeyError:
try: ed = exon_db[selected_probeset] ###not useful data here, but the objects need to exist
except IOError: ed = original_exon_db[probeset1]
ucsc_splice_annotations = ["retainedIntron","cassetteExon","strangeSplice","altFivePrime","altThreePrime","altPromoter","bleedingExon"]
custom_annotations = ["alt-3'","alt-5'","alt-C-term","alt-N-term","cassette-exon","cassette-exon","exon-region-exclusion","intron-retention","mutually-exclusive-exon","trans-splicing"]
custom_exon_annotations_found='no'; ucsc_annotations_found = 'no'; exon_annot_score=0
if len(ed.SplicingEvent())>0:
for annotation in ucsc_splice_annotations:
if annotation in ed.SplicingEvent(): ucsc_annotations_found = 'yes'
for annotation in custom_annotations:
if annotation in ed.SplicingEvent(): custom_exon_annotations_found = 'yes'
if custom_exon_annotations_found == 'yes' and ucsc_annotations_found == 'no': exon_annot_score = 3
elif ucsc_annotations_found == 'yes' and custom_exon_annotations_found == 'no': exon_annot_score = 4
elif ucsc_annotations_found == 'yes' and custom_exon_annotations_found == 'yes': exon_annot_score = 5
else: exon_annot_score = 2
try: gene_splice_event_score[affygene].append(exon_annot_score) ###store for gene level results
except KeyError: gene_splice_event_score[affygene] = [exon_annot_score]
try: gene_exon_region[affygene].append(ed.ExonRegionID()) ###store for gene level results
except KeyError: gene_exon_region[affygene] = [ed.ExonRegionID()]
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
if float(fold2)>0: fold2_direction = 'up'
if fold2_direction != fold_direction:
if float(fold2)>float(mean_fold_change):
large_splicing_diff2 = float(fold2)-float(mean_fold_change)
if abs(large_splicing_diff2) > large_splicing_diff1: large_splicing_diff = str(large_splicing_diff2)
else: large_splicing_diff = str(large_splicing_diff1)
if fold1_direction != fold2_direction and abs(float(fold1))>0.4 and abs(float(fold2))>0.4 and abs(float(mean_fold_change))< max([float(fold2),float(fold1)]):
opposite_splicing_pattern = 'yes'
### Annotate splicing events based on exon_strucuture data
if array_type == 'AltMouse':
extra_exon_annotation = ExonAnnotate_module.annotate_splice_event(exons1,exons2,extra_transcript_annotation)
try: splice_event_db[extra_exon_annotation] += 1
except KeyError: splice_event_db[extra_exon_annotation] = 1
try:
direct_domain_alignments = probeset_aligning_db[selected_probeset,probeset2]
try: direct_domain_gene_alignments[affygene]+=', '+direct_domain_alignments
except KeyError: direct_domain_gene_alignments[affygene]=direct_domain_alignments
except KeyError: direct_domain_alignments = ' '
splicing_event = ed.SplicingEvent()
if array_type == 'RNASeq':
splicing_event = checkForTransSplicing(probeset1_display,splicing_event)
splicing_event = checkForTransSplicing(probeset2,splicing_event)
exp1 = covertLogExpressionToNonLog(exp1)
exp2 = covertLogExpressionToNonLog(exp2)
baseline_const_exp = covertLogExpressionToNonLog(baseline_const_exp)
fold1 = covertLogFoldToNonLog(fold1)
fold2 = covertLogFoldToNonLog(fold2)
adjfold1 = covertLogFoldToNonLog(adjfold1)
adjfold2 = covertLogFoldToNonLog(adjfold2)
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
### Annotate splicing events based on pre-computed and existing annotations
values= [affygene,dI,symbol,fs(description),exons1,exons2,regulation_call,event_call,probeset1_display,rawp1,probeset2,rawp2,fold1,fold2,adjfold1,adjfold2]
values+=[extra_transcript_annotation,up_exons,down_exons,fs(new_functional_attribute_str),fs(new_uniprot_exon_feature_str),fs(seq_attribute_str),exp1,exp2,fs(direct_domain_alignments)]
values+=[str(baseline_const_exp),str(lowest_raw_p),p_value_extra,str(false_pos),mean_fold_change,extra_exon_annotation]
values+=[ed.ExternalExonIDs(),ed.ExonRegionID(),splicing_event,str(exon_annot_score),large_splicing_diff,location_summary]
exon_sets = abs(float(dI)),regulation_call,event_call,exons1,exons2,''
### Export significant reciprocol junction pairs and scores
values_ps = [probeset1+'|'+probeset2,affygene,'changed',dI,'NA',str(lowest_raw_p)]; values_ps = string.join(values_ps,'\t')+'\n'
try: ProcessedSpliceData_data.write(values_ps)
except Exception: None
values_ge = [affygene,'En',dI,str(lowest_raw_p),symbol,probeset1_display+' | '+probeset2]; values_ge = string.join(values_ge,'\t')+'\n'
if array_type == 'junction' or array_type == 'RNASeq': ### Only applies to reciprocal junction sensitive platforms (but not currently AltMouse)
goelite_data.write(values_ge)
if array_type == 'junction' or array_type == 'RNASeq': ### Only applies to reciprocal junction sensitive platforms (but not currently AltMouse)
try: exon_probeset = exon_array_translation_db[affygene+':'+exon_data[1][0]][0]; probeset1 = exon_probeset; gcn+=1
except Exception: probeset1 = None #probeset1 = affygene+':'+exon_data[1][0]
try:
null = int(probeset1) ### Must be an int to work in DomainGraph
values_dg = [probeset1,affygene,'changed',dI,'NA',str(lowest_raw_p)]; values_dg = string.join(values_dg,'\t')+'\n'
if array_type == 'junction' or array_type == 'RNASeq':
DG_data.write(values_dg)
values_srf = string.join([probeset1,'Ae',dI,str(lowest_raw_p)],'\t')+'\n'
if float(dI)>0:
SRFinder_ex_data.write(values_srf)
elif float(dI)<0:
SRFinder_in_data.write(values_srf)
except Exception: null=[]
else:
si_pvalue = lowest_raw_p
if si_pvalue == 1: si_pvalue = 'NA'
if probeset1 in midas_db:
midas_p = str(midas_db[probeset1])
if float(midas_p)<lowest_raw_p: lowest_raw_p = float(midas_p) ###This is the lowest and SI-pvalue
else: midas_p = ''
###Determine what type of exon-annotations are present to assign a confidence score
if affygene in annotate_db: ###Determine the transcript clusters used to comprise a splice event (genes and exon specific)
try:
gene_tc = annotate_db[affygene].TranscriptClusterIDs()
try: probeset_tc = [ed.SecondaryGeneID()]
except Exception: probeset_tc = [affygene]
for transcript_cluster in gene_tc: probeset_tc.append(transcript_cluster)
probeset_tc = makeUnique(probeset_tc)
except Exception: probeset_tc = ''; gene_tc=''
else:
try:
try: probeset_tc = [ed.SecondaryGeneID()]
except Exception: probeset_tc = [affygene]
probeset_tc = makeUnique(probeset_tc)
except Exception: probeset_tc = ''; gene_tc=''
cluster_number = len(probeset_tc)
try: alternatively_reg_tc[affygene] += probeset_tc
except KeyError: alternatively_reg_tc[affygene] = probeset_tc
try: last_exon_region = last_exon_region_db[affygene]
except KeyError: last_exon_region = ''
if cluster_number>1: exon_annot_score = 1
direct_domain_alignments = ' '
if array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null':
try:
direct_domain_alignments = probeset_aligning_db[probeset1]
try: direct_domain_gene_alignments[affygene]+=', '+direct_domain_alignments
except KeyError: direct_domain_gene_alignments[affygene]=direct_domain_alignments
except KeyError: direct_domain_alignments = ' '
else:
try: direct_domain_alignments = probeset_aligning_db[affygene+':'+exons1]
except KeyError: direct_domain_alignments = ''
if array_type == 'RNASeq':
exp1 = covertLogExpressionToNonLog(exp1)
baseline_const_exp = covertLogExpressionToNonLog(baseline_const_exp)
fold1 = covertLogFoldToNonLog(fold1)
adjfold1 = covertLogFoldToNonLog(adjfold1)
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
try: adj_SIp=fdr_exon_stats[probeset1].AdjP()
except Exception: adj_SIp = 'NA'
try: secondary_geneid = ed.SecondaryGeneID()
except Exception: secondary_geneid = affygene
if array_type == 'RNASeq':
secondary_geneid = ed.NovelExon()
### Write Splicing Index results
values= [affygene,dI,symbol,fs(description),exons1,regulation_call,probeset1,rawp1,str(lowest_raw_p),midas_p,fold1,adjfold1]
values+=[up_exons,down_exons,fs(new_functional_attribute_str),fs(new_uniprot_exon_feature_str),fs(seq_attribute_str),fs(direct_domain_alignments),exp1]
values+=[str(baseline_const_exp),str(si_pvalue),DV,mean_fold_change,secondary_geneid, ed.ExternalExonIDs()]
values+=[ed.Constitutive(),ed.ExonRegionID(),ed.SplicingEvent(),last_exon_region,ed.LocationSummary()] #str(exon_annot_score)
if probeset1 in filtered_probeset_db: values += filtered_probeset_db[probeset1]
exon_sets = abs(float(dI)),regulation_call,event_call,exons1,exons1,midas_p
probeset = probeset1 ### store original ID (gets converted below)
### Write DomainGraph results
try: midas_p = str(midas_db[probeset1])
except KeyError: midas_p = 'NA'
### Export significant exon/junction IDs and scores
values_ps = [probeset1,affygene,'changed',dI,'NA',str(lowest_raw_p)]; values_ps = string.join(values_ps,'\t')+'\n'
try: ProcessedSpliceData_data.write(values_ps)
except Exception: None
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
if (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null':
try: exon_probeset = exon_array_translation_db[affygene+':'+exon_data[1][0]][0]; probeset1 = exon_probeset; gcn+=1
except Exception: probeset1 = None ### don't write out a line
else:
try: exon_probeset = exon_array_translation_db[probeset1][0]; probeset1 = exon_probeset; gcn+=1
except Exception: probeset1=None; #null=[]; #print gcn, probeset1;kill - force an error - new in version 2.0.8
try:
null = int(probeset1)
values_dg = [probeset1,affygene,'changed',dI,str(si_pvalue),midas_p]; values_dg = string.join(values_dg,'\t')+'\n'
DG_data.write(values_dg)
values_srf = string.join([probeset1,'Ae',dI,str(lowest_raw_p)],'\t')+'\n'
if float(dI)>0:
SRFinder_ex_data.write(values_srf)
elif float(dI)<0:
SRFinder_in_data.write(values_srf)
except Exception: null=[]
values_ge = [affygene,'En',dI,str(si_pvalue),midas_p,symbol,probeset]; values_ge = string.join(values_ge,'\t')+'\n'
goelite_data.write(values_ge)
if len(ed.SplicingEvent())>2:
try: external_exon_annot[affygene].append(ed.SplicingEvent())
except KeyError: external_exon_annot[affygene] = [ed.SplicingEvent()]
try: values = string.join(values,'\t')+'\n'
except Exception: print values;kill
data.write(values)
###Process data for gene level reports
if float((lowest_raw_p))<=p_threshold or false_pos < 2 or lowest_raw_p == 1:
try: comparison_count[affygene] += 1
except KeyError: comparison_count[affygene] = 1
try: aspire_gene_results[affygene].append(exon_sets)
except KeyError: aspire_gene_results[affygene] = [exon_sets]
for exon in up_exon_list:
exon_info = exon,'upregulated'
try: critical_gene_exons[affygene].append(exon_info)
except KeyError: critical_gene_exons[affygene] = [exon_info]
for exon in down_exon_list:
exon_info = exon,'downregulated'
try: critical_gene_exons[affygene].append(exon_info)
except KeyError: critical_gene_exons[affygene] = [exon_info]
data.close()
print event_count, analysis_method, "results written to:", aspire_output,'\n'
try: clearObjectsFromMemory(original_exon_db)
except Exception: null=[]
exon_array_translation_db=[]; original_exon_db=[]; probeset_to_gene=[]
### Finish writing the DomainGraph export file with non-significant probesets
if array_type != 'AltMouse':
for probeset in excluded_probeset_db:
eed = excluded_probeset_db[probeset]
try: midas_p = str(midas_db[probeset])
except KeyError: midas_p = 'NA'
### Export significant exon/junction IDs and scores
try: values_ps = [probeset,eed.GeneID(),'UC',eed.Score(),str(eed.TTestNormalizedRatios()),midas_p]
except Exception: excl_probeset, geneid, score, rawp, pvalue = eed; values_ps = [probeset,geneid,'UC', str(score), str(rawp), str(pvalue)]
values_ps = string.join(values_ps,'\t')+'\n'; ProcessedSpliceData_data.write(values_ps)
### Write DomainGraph results
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
try: exon_probeset = exon_array_translation_db[probeset][0]; probeset = exon_probeset; gcn+=1
except Exception: probeset=None; # null=[] - force an error - new in version 2.0.8
try: values_dg = [probeset,eed.GeneID(),'UC',eed.Score(),str(eed.TTestNormalizedRatios()),midas_p]
except Exception:
try:
excl_probeset, geneid, score, rawp, pvalue = eed
if ':' in probeset: probeset = excl_probeset ### Example: ENSMUSG00000029213:E2.1, make this just the numeric exclusion probeset - Not sure if DG handles non-numeric
values_dg = [probeset,geneid,'UC', str(score), str(rawp), str(pvalue)]
except Exception: None
try:
null=int(probeset)
values_dg = string.join(values_dg,'\t')+'\n'; DG_data.write(values_dg)
except Exception: null=[]
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
for id in exon_array_translation_db:
SRFinder_denom_data.write(exon_array_translation_db[id]+'\tAe\n')
else:
for probeset in original_exon_db:
SRFinder_denom_data.write(probeset+'\tAe\n')
DG_data.close()
SRFinder_in_data.close()
SRFinder_ex_data.close()
SRFinder_denom_data.close()
for affygene in direct_domain_gene_alignments:
domains = string.split(direct_domain_gene_alignments[affygene],', ')
domains = unique.unique(domains); domains = string.join(domains,', ')
direct_domain_gene_alignments[affygene] = domains
### functional_attribute_db2 will be reorganized so save the database with another. Use this
functional_attribute_db = functional_attribute_db2
functional_attribute_db2 = reorganize_attribute_entries(functional_attribute_db2,'no')
external_exon_annot = eliminate_redundant_dict_values(external_exon_annot)
protein_exon_feature_db = protein_exon_feature_db2
protein_exon_feature_db2 = reorganize_attribute_entries(protein_exon_feature_db2,'no')
############ Export Gene Data ############
up_splice_val_genes = 0; down_dI_genes = 0; diff_exp_spliced_genes = 0; diff_spliced_rna_factor = 0
ddI = 0; udI = 0
summary_data_db['direct_domain_genes']=len(direct_domain_gene_alignments)
summary_data_db['alt_genes']=len(aspire_gene_results)
critical_gene_exons = eliminate_redundant_dict_values(critical_gene_exons)
aspire_output_gene = root_dir+'AltResults/AlternativeOutput/' + dataset_name + analysis_method + '-exon-inclusion-GENE-results.txt'
data = export.ExportFile(aspire_output_gene)
if array_type == 'AltMouse': goelite_data.write("GeneID\tSystemCode\n")
title = ['AffyGene','max_dI','midas-p (corresponding)','symbol','external gene ID','description','regulation_call','event_call']
title +=['number_of_comparisons','num_effected_exons','up_exons','down_exons','functional_attribute','uniprot-ens_exon_features','direct_domain_alignments']
title +=['pathways','mean_fold_change','exon-annotations','exon-region IDs','alternative gene ID','splice-annotation score']
title = string.join(title,'\t')+'\n'
data.write(title)
for affygene in aspire_gene_results:
if affygene in annotate_db:
description = annotate_db[affygene].Description()
symbol = annotate_db[affygene].Symbol()
ensembl = annotate_db[affygene].ExternalGeneID()
if array_type != 'AltMouse' and array_type != 'RNASeq': transcript_clusters = alternatively_reg_tc[affygene]; transcript_clusters = makeUnique(transcript_clusters); transcript_clusters = string.join(transcript_clusters,'|')
else: transcript_clusters = affygene
rna_processing_factor = annotate_db[affygene].RNAProcessing()
else: description='';symbol='';ensembl=affygene;rna_processing_factor=''; transcript_clusters=''
if ensembl in go_annotations: wpgo = go_annotations[ensembl]; goa = wpgo.Combined()
else: goa = ''
if array_type == 'AltMouse':
if len(ensembl) >0: goelite_data.write(ensembl+'\tL\n')
try: gene_splice_event_score[affygene].sort(); top_se_score = str(gene_splice_event_score[affygene][-1])
except KeyError: top_se_score = 'NA'
try: gene_regions = gene_exon_region[affygene]; gene_regions = makeUnique(gene_regions); gene_regions = string.join(gene_regions,'|')
except KeyError: gene_regions = 'NA'
if analysis_method == 'ASPIRE' or analysis_method == 'linearregres': number_of_comparisons = str(comparison_count[affygene])
else: number_of_comparisons = 'NA'
results_list = aspire_gene_results[affygene]
results_list.sort(); results_list.reverse()
max_dI = str(results_list[0][0])
regulation_call = results_list[0][1]
event_call = results_list[0][2]
midas_p = results_list[0][-1]
num_critical_exons = str(len(critical_gene_exons[affygene]))
try: direct_domain_annots = direct_domain_gene_alignments[affygene]
except KeyError: direct_domain_annots = ' '
down_exons = ''; up_exons = ''; down_list=[]; up_list=[]
for exon_info in critical_gene_exons[affygene]:
exon = exon_info[0]; call = exon_info[1]
if call == 'downregulated':
down_exons = down_exons + exon + ','
down_list.append(exon)
ddI += 1
if call == 'upregulated':
up_exons = up_exons + exon + ','
up_list.append(exon)
udI += 1
down_exons = down_exons[0:-1]
up_exons = up_exons[0:-1]
up_exons = add_a_space(up_exons); down_exons = add_a_space(down_exons)
functional_annotation =''
if affygene in functional_attribute_db2:
number_of_functional_attributes = str(len(functional_attribute_db2[affygene]))
attribute_list = functional_attribute_db2[affygene]
attribute_list.sort()
for attribute_exon_info in attribute_list:
exon_attribute = attribute_exon_info[0]
exon_list = attribute_exon_info[1]
functional_annotation = functional_annotation + exon_attribute
exons = '('
for exon in exon_list: exons = exons + exon + ','
exons = exons[0:-1] + '),'
if add_exons_to_annotations == 'yes': functional_annotation = functional_annotation + exons
else: functional_annotation = functional_annotation + ','
functional_annotation = functional_annotation[0:-1]
uniprot_exon_annotation = ''
if affygene in protein_exon_feature_db2:
number_of_functional_attributes = str(len(protein_exon_feature_db2[affygene]))
attribute_list = protein_exon_feature_db2[affygene]; attribute_list.sort()
for attribute_exon_info in attribute_list:
exon_attribute = attribute_exon_info[0]
exon_list = attribute_exon_info[1]
uniprot_exon_annotation = uniprot_exon_annotation + exon_attribute
exons = '('
for exon in exon_list: exons = exons + exon + ','
exons = exons[0:-1] + '),'
if add_exons_to_annotations == 'yes': uniprot_exon_annotation = uniprot_exon_annotation + exons
else: uniprot_exon_annotation = uniprot_exon_annotation + ','
uniprot_exon_annotation = uniprot_exon_annotation[0:-1]
if len(uniprot_exon_annotation) == 0: uniprot_exon_annotation = ' '
if len(functional_annotation) == 0: functional_annotation = ' '
if affygene in gene_expression_diff_db:
mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
try:
if abs(float(mean_fold_change)) > log_fold_cutoff: diff_exp_spliced_genes += 1
except Exception: diff_exp_spliced_genes = diff_exp_spliced_genes
else: mean_fold_change = 'NC'
if len(rna_processing_factor) > 2: diff_spliced_rna_factor +=1
###Add annotations for where in the gene structure these exons are (according to Ensembl)
if affygene in external_exon_annot: external_gene_annot = string.join(external_exon_annot[affygene],', ')
else: external_gene_annot = ''
if array_type == 'RNASeq':
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
values =[affygene,max_dI,midas_p,symbol,ensembl,fs(description),regulation_call,event_call,number_of_comparisons]
values+=[num_critical_exons,up_exons,down_exons,functional_annotation]
values+=[fs(uniprot_exon_annotation),fs(direct_domain_annots),fs(goa),mean_fold_change,external_gene_annot,gene_regions,transcript_clusters,top_se_score]
values = string.join(values,'\t')+'\n'
data.write(values)
### Use results for summary statistics
if len(up_list)>len(down_list): up_splice_val_genes +=1
else: down_dI_genes +=1
data.close()
print "Gene-level results written"
###yes here indicates that although the truncation events will initially be filtered out, later they will be added
###back in without the non-truncation annotations....if there is no second database (in this case functional_attribute_db again)
###IF WE WANT TO FILTER OUT NON-NMD ENTRIES WHEN NMD IS PRESENT (FOR A GENE) MUST INCLUDE functional_attribute_db AS THE SECOND VARIABLE!!!!
###Currently, yes does nothing
functional_annotation_db, null = grab_summary_dataset_annotations(functional_attribute_db,'','yes')
upregulated_genes = 0; downregulated_genes = 0
###Calculate the number of upregulated and downregulated genes
for affygene in gene_expression_diff_db:
fold_val = gene_expression_diff_db[affygene].ConstitutiveFold()
try:
if float(fold_val) > log_fold_cutoff: upregulated_genes += 1
elif abs(float(fold_val)) > log_fold_cutoff: downregulated_genes += 1
except Exception: null=[]
upregulated_rna_factor = 0; downregulated_rna_factor = 0
###Calculate the total number of putative RNA-processing/binding factors differentially regulated
for affygene in gene_expression_diff_db:
gene_fold = gene_expression_diff_db[affygene].ConstitutiveFold()
rna_processing_factor = gene_expression_diff_db[affygene].RNAProcessing()
if len(rna_processing_factor) > 1:
if gene_fold>log_fold_cutoff: upregulated_rna_factor += 1
elif abs(gene_fold)>log_fold_cutoff: downregulated_rna_factor += 1
###Generate three files for downstream functional summary
### functional_annotation_db2 is output to the same function as functional_annotation_db, ranked_uniprot_list_all to get all ranked uniprot annotations,
### and ranked_uniprot_list_coding_only to get only coding ranked uniprot annotations
functional_annotation_db2, ranked_uniprot_list_all = grab_summary_dataset_annotations(protein_exon_feature_db,'','') #functional_attribute_db
null, ranked_uniprot_list_coding_only = grab_summary_dataset_annotations(protein_exon_feature_db,functional_attribute_db,'') #functional_attribute_db
functional_attribute_db=[]; protein_exon_feature_db=[]
###Sumarize changes in avg protein length for each splice event
up_protein_list=[];down_protein_list=[]; protein_length_fold_diff=[]
for [down_protein,up_protein] in protein_length_list:
up_protein = float(up_protein); down_protein = float(down_protein)
down_protein_list.append(down_protein); up_protein_list.append(up_protein)
if up_protein > 10 and down_protein > 10:
fold_change = up_protein/down_protein; protein_length_fold_diff.append(fold_change)
median_fold_diff = statistics.median(protein_length_fold_diff)
try: down_avg=int(statistics.avg(down_protein_list)); up_avg=int(statistics.avg(up_protein_list))
except Exception: down_avg=0; up_avg=0
try:
try:
down_std=int(statistics.stdev(down_protein_list)); up_std=int(statistics.stdev(up_protein_list))
except ValueError: ###If 'null' is returned fro stdev
down_std = 0;up_std = 0
except Exception:
down_std = 0;up_std = 0
if len(down_protein_list)>1 and len(up_protein_list)>1:
try:
#t,df,tails = statistics.ttest(down_protein_list,up_protein_list,2,3)
#t = abs(t);df = round(df)
#print 'ttest t:',t,'df:',df
#p = str(statistics.t_probability(t,df))
p = str(statistics.runComparisonStatistic(down_protein_list,up_protein_list,probability_statistic))
#print dataset_name,p
except Exception: p = 'NA'
if p == 1: p = 'NA'
else: p = 'NA'
###Calculate unique reciprocal isoforms for exon-inclusion, exclusion and mutual-exclusive events
unique_exon_inclusion_count=0;unique_exon_exclusion_count=0;unique_mutual_exclusive_count=0;
unique_exon_event_db = eliminate_redundant_dict_values(unique_exon_event_db)
for affygene in unique_exon_event_db:
isoform_entries = unique_exon_event_db[affygene]
possibly_redundant=[]; non_redundant=[]; check_for_redundant=[]
for entry in isoform_entries:
if entry[0] == 1: ### If there is only one regulated exon
possibly_redundant.append(entry)
else:
non_redundant.append(entry)
critical_exon_list = entry[1]
for exon in critical_exon_list:
check_for_redundant.append(exon)
for entry in possibly_redundant:
exon = entry[1][0]
if exon not in check_for_redundant:
non_redundant.append(entry)
for entry in non_redundant:
if entry[2] == 'ei-ex':
if entry[3] == 'upregulated': unique_exon_inclusion_count += 1
else: unique_exon_exclusion_count += 1
else: unique_mutual_exclusive_count += 1
udI = unique_exon_inclusion_count; ddI = unique_exon_exclusion_count; mx = unique_mutual_exclusive_count
###Add splice event information to the functional_annotation_db
for splice_event in splice_event_db:count = splice_event_db[splice_event]; functional_annotation_db.append((splice_event,count))
if analysis_method == 'splicing-index' or analysis_method == 'FIRMA': udI='NA'; ddI='NA'
summary_results_db[dataset_name[0:-1]] = udI,ddI,mx,up_splice_val_genes,down_dI_genes,(up_splice_val_genes + down_dI_genes),upregulated_genes, downregulated_genes, diff_exp_spliced_genes, upregulated_rna_factor,downregulated_rna_factor,diff_spliced_rna_factor,down_avg,down_std,up_avg,up_std,p,median_fold_diff,functional_annotation_db
result_list = exportComparisonSummary(dataset_name,summary_data_db,'log')
###Re-set this variable (useful for testing purposes)
clearObjectsFromMemory(gene_expression_diff_db)
clearObjectsFromMemory(splice_event_list); clearObjectsFromMemory(si_db); si_db=[]
clearObjectsFromMemory(fdr_exon_stats)
try: clearObjectsFromMemory(excluded_probeset_db); clearObjectsFromMemory(ex_db); ex_db=[]
except Exception: ex_db=[]
clearObjectsFromMemory(exon_db)
#clearObjectsFromMemory(annotate_db)
critical_probeset_annotation_db=[]; gene_expression_diff_db=[]; domain_associated_genes=[]; permute_p_values=[]
permute_miR_inputs=[]; seq_attribute_str=[]; microRNA_count_db=[]; excluded_probeset_db=[]; fdr_exon_stats=[]
splice_event_list=[]; critical_exon_db_len=len(critical_exon_db)#; critical_exon_db=[] deleting here will cause a global instance problem
all_domain_gene_hits=[]; gene_splice_event_score=[]; unique_exon_event_db=[]; probeset_aligning_db=[]; ranked_uniprot_list_all=[];
filtered_microRNA_exon_db=[]; permute_domain_inputs=[]; functional_annotation_db2=[]; functional_attribute_db2=[]; protein_length_list=[];
ranked_uniprot_list_coding_only=[]; miR_str=[]; permute_input_list=[]; microRNA_exon_feature_db2=[]; alternatively_reg_tc=[];
direct_domain_gene_alignments=[]; aspire_gene_results=[]; domain_gene_counts=[]; functional_annotation=[]; protein_exon_feature_db2=[];
microRNA_attribute_db=[]; probeset_mirBS_db=[]; exon_hits=[]; critical_gene_exons=[]; gene_exon_region=[]; exon_db=[]; external_exon_annot=[];
values=[]; down_protein_list=[]; functional_annotation_db=[]; protein_length_fold_diff=[]; comparison_count=[]; filtered_arrayids=[];
domain_hit_gene_count_db=[]; up_protein_list=[]; probeset_domain_db=[]
try: goelite_data.close()
except Exception: null=[]
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
return summary_results_db, summary_results_db2, aspire_output, aspire_output_gene, critical_exon_db_len
def deviation(dI,avg_dI,stdev_dI):
dI = covertLogFoldToNonLogFloat(dI)
avg_dI = covertLogFoldToNonLogFloat(avg_dI)
stdev_dI = covertLogFoldToNonLogFloat(stdev_dI)
return str(abs((dI-avg_dI)/stdev_dI))
def covertLogExpressionToNonLog(log_val):
if normalization_method == 'RPKM':
nonlog_val = (math.pow(2,float(log_val)))
else:
nonlog_val = (math.pow(2,float(log_val)))-1
return str(nonlog_val)
def covertLogFoldToNonLog(log_val):
try:
if float(log_val)<0: nonlog_val = (-1/math.pow(2,(float(log_val))))
else: nonlog_val = (math.pow(2,float(log_val)))
except Exception: nonlog_val = log_val
return str(nonlog_val)
def covertLogFoldToNonLogFloat(log_val):
if float(log_val)<0: nonlog_val = (-1/math.pow(2,(float(log_val))))
else: nonlog_val = (math.pow(2,float(log_val)))
return nonlog_val
def checkForTransSplicing(uid,splicing_event):
pl = string.split(uid,':')
if len(pl)>2:
if pl[0] not in pl[1]: ### Two different genes
if len(splicing_event)>0: splicing_event+= '|trans-splicing'
else: splicing_event = '|trans-splicing'
return splicing_event
def fs(text):
### Formats a text entry to prevent delimiting a comma
return '"'+text+'"'
def analyzeSplicingIndex(fold_dbase):
"""The Splicing Index (SI) represents the log ratio of the exon intensities between the two tissues after normalization
to the gene intensities in each sample: SIi = log2((e1i/g1j)/(e2i/g2j)), for the i-th exon of the j-th gene in tissue
type 1 or 2. The splicing indices are then subjected to a t-test to probe for differential inclusion of the exon into the gene.
In order to determine if the change in isoform expression was statistically significant, a simple two-tailed t-test was carried
out on the isoform ratios by grouping the 10 samples from either "tumor" or "normal" tissue.
The method ultimately producing the highest proportion of true positives was to retain only: a) exons with a DABG p-value < 0.05,
b) genes with a signal > 70, c) exons with a log ratio between tissues (i.e., the gene-level normalized fold change) > 0.5,
d) Splicing Index p-values < 0.005 and e) Core exons.
Gardina PJ, Clark TA, Shimada B, Staples MK, Yang Q, Veitch J, Schweitzer A, Awad T, Sugnet C, Dee S, Davies C, Williams A, Turpaz Y.
Alternative splicing and differential gene expression in colon cancer detected by a whole genome exon array.
BMC Genomics. 2006 Dec 27;7:325. PMID: 17192196
"""
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db)>0:
temp_db={}
for probeset in fold_dbase: temp_db[probeset]=[]
for probeset in temp_db:
try: filtered_probeset_db[probeset]
except KeyError: del fold_dbase[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
proceed = 0
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try: del fold_dbase[probeset]
except KeyError: null=[]
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
data = export.ExportFile(summary_output)
title = string.join(['gene\tExonID\tprobesets']+original_array_names,'\t')+'\n'; data.write(title)
print 'Calculating splicing-index values (please be patient)...',
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print len(fold_dbase),id_name,'beging examined'
###original_avg_const_exp_db contains constitutive mean expression values per group: G6953871 [7.71, 7.66]
###array_raw_group_values: Raw expression values in list of groups: G7072464@J935416_RC@j_at ([1.79, 2.16, 2.22], [1.68, 2.24, 1.97, 1.92, 2.12])
###avg_const_exp_db contains the raw constitutive expression values in a single list
splicing_index_hash=[]; excluded_probeset_db={}; denominator_probesets=0; interaction = 0
original_increment = int(len(exon_db)/20); increment = original_increment
for probeset in exon_db:
ed = exon_db[probeset]
#include_probeset = ed.IncludeProbeset()
if interaction == increment: increment+=original_increment; print '*',
interaction +=1
include_probeset = 'yes' ###Moved this filter to import of the probeset relationship file
###Examines user input parameters for inclusion of probeset types in the analysis
if include_probeset == 'yes':
geneid = ed.GeneID()
if probeset in fold_dbase and geneid in original_avg_const_exp_db: ###used to search for array_raw_group_values, but when filtered by expression changes, need to filter by adj_fold_dbase
denominator_probesets+=1
###Includes probesets with a calculated constitutive expression value for each gene and expression data for that probeset
group_index = 0; si_interim_group_db={}; si_interim_group_str_db={}; ge_threshold_count=0; value_count = 0
for group_values in array_raw_group_values[probeset]:
"""gene_expression_value = math.pow(2,original_avg_const_exp_db[geneid][group_index])
###Check to see if gene expression is > threshod for both conditions
if gene_expression_value>gene_expression_threshold:ge_threshold_count+=1"""
value_index = 0; ratio_hash=[]; ratio_str_hash=[]
for value in group_values: ###Calculate normalized ratio's for each condition and save raw values for later permutation
#exp_val = math.pow(2,value);ge_val = math.pow(2,avg_const_exp_db[geneid][value_count]) ###To calculate a ttest we need the raw constitutive expression values, these are not in group list form but are all in a single list so keep count.
exp_val = value;ge_val = avg_const_exp_db[geneid][value_count]
exp_ratio = exp_val-ge_val; ratio_hash.append(exp_ratio); ratio_str_hash.append(str(exp_ratio))
value_index +=1; value_count +=1
si_interim_group_db[group_index] = ratio_hash
si_interim_group_str_db[group_index] = ratio_str_hash
group_index+=1
group1_ratios = si_interim_group_db[0]; group2_ratios = si_interim_group_db[1]
group1_mean_ratio = statistics.avg(group1_ratios); group2_mean_ratio = statistics.avg(group2_ratios)
if export_NI_values == 'yes':
try: er = ed.ExonID()
except Exception: er = 'NA'
ev = string.join([geneid+'\t'+er+'\t'+probeset]+si_interim_group_str_db[0]+si_interim_group_str_db[1],'\t')+'\n'; data.write(ev)
#if ((math.log(group1_mean_ratio,2))*(math.log(group2_mean_ratio,2)))<0: opposite_SI_log_mean = 'yes'
if (group1_mean_ratio*group2_mean_ratio)<0: opposite_SI_log_mean = 'yes'
else: opposite_SI_log_mean = 'no'
try:
if calculate_normIntensity_p == 'yes':
try:
normIntensityP = statistics.runComparisonStatistic(group1_ratios,group2_ratios,probability_statistic)
except Exception: normIntensityP = 'NA' ### Occurs when analyzing two groups with no variance
else: normIntensityP = 'NA' ### Set to an always signficant value
if normIntensityP == 1: normIntensityP = 'NA'
splicing_index = group1_mean_ratio-group2_mean_ratio; abs_splicing_index = abs(splicing_index)
#if probeset == '3061323': print abs_splicing_index,normIntensityP,ed.ExonID(),group1_mean_ratio,group2_mean_ratio,math.log(group1_mean_ratio,2),math.log(group2_mean_ratio,2),((math.log(group1_mean_ratio,2))*(math.log(group2_mean_ratio,2))),opposite_SI_log_mean; kill
if probeset in midas_db:
try: midas_p = float(midas_db[probeset])
except ValueError:
midas_p = 0
#if abs_splicing_index>1 and normIntensityP < 0.05: print probeset,normIntensityP, abs_splicing_index;kill
else: midas_p = 0
#print ed.GeneID(),ed.ExonID(),probeset,splicing_index,normIntensityP,midas_p,group1_ratios,group2_ratios
if abs_splicing_index>alt_exon_logfold_cutoff and (normIntensityP < p_threshold or normIntensityP == 'NA' or normIntensityP == 1) and midas_p < p_threshold:
exonid = ed.ExonID(); critical_exon_list = [1,[exonid]]
constit_exp1 = original_avg_const_exp_db[geneid][0]
constit_exp2 = original_avg_const_exp_db[geneid][1]
ge_fold=constit_exp2-constit_exp1
### Re-define all of the pairwise values now that the two Splicing-Index groups to report have been determined
data_list1 = array_raw_group_values[probeset][0]; data_list2 = array_raw_group_values[probeset][1]
baseline_exp = statistics.avg(data_list1); experimental_exp = statistics.avg(data_list2); fold_change = experimental_exp - baseline_exp
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 1
normInt1 = (baseline_exp-constit_exp1); normInt2 = (experimental_exp-constit_exp2); adj_fold = normInt2 - normInt1
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, '')
sid = ExonData(splicing_index,probeset,critical_exon_list,geneid,group1_ratios,group2_ratios,normIntensityP,opposite_SI_log_mean)
sid.setConstitutiveExpression(constit_exp1); sid.setConstitutiveFold(ge_fold); sid.setProbesetExpressionData(ped)
splicing_index_hash.append((splicing_index,sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(splicing_index,geneid,normIntensityP)
excluded_probeset_db[probeset] = eed
except Exception:
null = [] ###If this occurs, then most likely, the exon and constitutive probeset are the same
print 'Splicing Index analysis complete'
if export_NI_values == 'yes': data.close()
splicing_index_hash.sort(); splicing_index_hash.reverse()
print len(splicing_index_hash),id_name,"with evidence of Alternative expression"
p_value_call=''; permute_p_values = {}; summary_data_db['denominator_exp_events']=denominator_probesets
return splicing_index_hash,p_value_call,permute_p_values, excluded_probeset_db
def importResiduals(filename,probe_probeset_db):
fn=filepath(filename); key_db = {}; x=0; prior_uid = ''; uid_gene_db={}
for line in open(fn,'rU').xreadlines():
if x == 0 and line[0] == '#': null=[]
elif x == 0: x+=1
else:
data = cleanUpLine(line)
t = string.split(data,'\t')
uid = t[0]; uid,probe = string.split(uid,'-')
try:
probeset = probe_probeset_db[probe]; residuals = t[1:]
if uid == prior_uid:
try: uid_gene_db[probeset].append(residuals) ### Don't need to keep track of the probe ID
except KeyError: uid_gene_db[probeset] = [residuals]
else: ### Hence, we have finished storing all residual data for that gene
if len(uid_gene_db)>0: calculateFIRMAScores(uid_gene_db); uid_gene_db={}
try: uid_gene_db[probeset].append(residuals) ### Don't need to keep track of the probe ID
except KeyError: uid_gene_db[probeset] = [residuals]
prior_uid = uid
except Exception: null=[]
### For the last gene imported
if len(uid_gene_db)>0: calculateFIRMAScores(uid_gene_db)
def calculateFIRMAScores(uid_gene_db):
probeset_residuals={}; all_gene_residuals=[]; total_probes=0
for probeset in uid_gene_db:
residuals_list = uid_gene_db[probeset]; sample_db={}; total_probes+=len(residuals_list)
### For all probes in a probeset, calculate the median residual for each sample
for residuals in residuals_list:
index=0
for residual in residuals:
try: sample_db[index].append(float(residual))
except KeyError: sample_db[index] = [float(residual)]
all_gene_residuals.append(float(residual))
index+=1
for index in sample_db:
median_residual = statistics.median(sample_db[index])
sample_db[index] = median_residual
probeset_residuals[probeset] = sample_db
### Calculate the Median absolute deviation
"""http://en.wikipedia.org/wiki/Absolute_deviation
The median absolute deviation (also MAD) is the median absolute deviation from the median. It is a robust estimator of dispersion.
For the example {2, 2, 3, 4, 14}: 3 is the median, so the absolute deviations from the median are {1, 1, 0, 1, 11} (or reordered as
{0, 1, 1, 1, 11}) with a median absolute deviation of 1, in this case unaffected by the value of the outlier 14.
Here, the global gene median will be expressed as res_gene_median.
"""
res_gene_median = statistics.median(all_gene_residuals); subtracted_residuals=[]
for residual in all_gene_residuals: subtracted_residuals.append(abs(res_gene_median-residual))
gene_MAD = statistics.median(subtracted_residuals)
#if '3263614' in probeset_residuals: print len(all_gene_residuals),all_gene_residuals
for probeset in probeset_residuals:
sample_db = probeset_residuals[probeset]
for index in sample_db:
median_residual = sample_db[index]
try:
firma_score = median_residual/gene_MAD
sample_db[index] = firma_score
except Exception: null=[]
#if probeset == '3263614': print index, median_residual, firma_score, gene_MAD
firma_scores[probeset] = sample_db
def importProbeToProbesets(fold_dbase):
#print "Importing probe-to-probeset annotations (please be patient)..."
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_probeset-probes.txt'
probeset_to_include={}
gene2examine={}
### Although we want to restrict the analysis to probesets in fold_dbase, we don't want to effect the FIRMA model - filter later
for probeset in fold_dbase:
try: ed = exon_db[probeset]; gene2examine[ed.GeneID()]=[]
except Exception: null=[]
for gene in original_avg_const_exp_db: gene2examine[gene]=[]
for probeset in exon_db:
ed = exon_db[probeset]; geneid = ed.GeneID()
if geneid in gene2examine:
gene2examine[geneid].append(probeset) ### Store these so we can break things up
probeset_to_include[probeset]=[]
probeset_probe_db = importGenericFilteredDBList(filename,probeset_to_include)
### Get Residuals filename and verify it's presence
#print "Importing comparison residuals..."
filename_objects = string.split(dataset_name[:-1],'.p'); filename = filename_objects[0]+'.txt'
if len(array_group_list)==2:
filename = import_dir = root_dir+'AltExpression/FIRMA/residuals/'+array_type+'/'+species+'/'+filename
else: filename = import_dir = root_dir+'AltExpression/FIRMA/FullDatasets/'+array_type+'/'+species+'/'+filename
status = verifyFile(filename)
if status != 'found':
print_out = 'The residual file:'; print_out+= filename
print_out+= 'was not found in the default location.\nPlease make re-run the analysis from the Beginning.'
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out
print traceback.format_exc(); badExit()
print "Calculating FIRMA scores..."
input_count = len(gene2examine) ### Number of probesets or probeset pairs (junction array) alternatively regulated
original_increment = int(input_count/20); increment = original_increment
start_time = time.time(); x=0
probe_probeset_db={}; gene_count=0; total_gene_count = 0; max_gene_count=3000; round = 1
for gene in gene2examine:
gene_count+=1; total_gene_count+=1; x+=1
#if x == increment: increment+=original_increment; print '*',
for probeset in gene2examine[gene]:
for probe in probeset_probe_db[probeset]: probe_probeset_db[probe] = probeset
if gene_count == max_gene_count:
### Import residuals and calculate primary sample/probeset FIRMA scores
importResiduals(filename,probe_probeset_db)
#print max_gene_count*round,"genes"
print '*',
gene_count=0; probe_probeset_db={}; round+=1 ### Reset these variables and re-run
probeset_probe_db={}
### Analyze residuals for the remaining probesets (< max_gene_count)
importResiduals(filename,probe_probeset_db)
end_time = time.time(); time_diff = int(end_time-start_time)
print "FIRMA scores calculted for",total_gene_count, "genes in %d seconds" % time_diff
def FIRMAanalysis(fold_dbase):
"""The FIRMA method calculates a score for each probeset and for each samples within a group of arrays, independent
of group membership. However, in AltAnalyze, these analyses are performed dependent on group. The FIRMA score is calculated
by obtaining residual values (residuals is a variable for each probe that can't be explained by the GC content or intensity
of that probe) from APT, for all probes corresponding to a metaprobeset (Ensembl gene in AltAnalyze). These probe residuals
are imported and the ratio of the median residual per probeset per sample divided by the absolute standard deviation of the
median of all probes for all samples for that gene."""
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db)>0:
temp_db={}
for probeset in fold_dbase: temp_db[probeset]=[]
for probeset in temp_db:
try: filtered_probeset_db[probeset]
except KeyError: del fold_dbase[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
proceed = 0
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try: del fold_dbase[probeset]
except KeyError: null=[]
#print 'Beginning FIRMA analysis (please be patient)...'
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
sample_names_ordered = [] ### note: Can't use original_array_names since the order is potentially different (FIRMA stores sample data as indeces within dictionary keys)
for group_name in array_group_list: ### THIS LIST IS USED TO MAINTAIN CONSISTENT GROUP ORDERING DURING ANALYSIS
for sample_name in array_group_name_db[group_name]: sample_names_ordered.append(sample_name)
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
data = export.ExportFile(summary_output)
title = string.join(['gene-probesets']+sample_names_ordered,'\t')+'\n'; data.write(title)
### Import probes for probesets to be analyzed
global firma_scores; firma_scores = {}
importProbeToProbesets(fold_dbase)
print 'FIRMA scores obtained for',len(firma_scores),'probests.'
### Group sample scores for each probeset and calculate statistics
firma_hash=[]; excluded_probeset_db={}; denominator_probesets=0; interaction = 0
original_increment = int(len(firma_scores)/20); increment = original_increment
for probeset in firma_scores:
if probeset in fold_dbase: ### Filter based on expression
ed = exon_db[probeset]; geneid = ed.GeneID()
if interaction == increment: increment+=original_increment; print '*',
interaction +=1; denominator_probesets+=1
sample_db = firma_scores[probeset]
###Use the index values from performExpressionAnalysis to assign each expression value to a new database
firma_group_array = {}
for group_name in array_group_db:
for array_index in array_group_db[group_name]:
firma_score = sample_db[array_index]
try: firma_group_array[group_name].append(firma_score)
except KeyError: firma_group_array[group_name] = [firma_score]
###array_group_list should already be unique and correctly sorted (see above)
firma_lists=[]; index=0
for group_name in array_group_list:
firma_list = firma_group_array[group_name]
if len(array_group_list)>2: firma_list = statistics.avg(firma_list), firma_list, index
firma_lists.append(firma_list); index+=1
if export_NI_values == 'yes': ### DO THIS HERE SINCE firma_lists IS SORTED BELOW!!!!
try: er = ed.ExonID()
except Exception: er = 'NA'
export_list = [geneid+'\t'+er+'\t'+probeset]; export_list2=[]
for firma_ls in firma_lists:
if len(array_group_list)>2: firma_ls =firma_ls[1] ### See above modification of firma_list object for multiple group anlaysis
export_list+=firma_ls
for i in export_list: export_list2.append(str(i))
ev = string.join(export_list2,'\t')+'\n'; data.write(ev)
if len(array_group_list)==2:
firma_list1 = firma_lists[0]; firma_list2 = firma_lists[-1]; firma_avg1 = statistics.avg(firma_list1); firma_avg2 = statistics.avg(firma_list2)
index1=0; index2=1 ### Only two groups, thus only two indeces
else: ### The below code deals with identifying the comparisons which yeild the greatest FIRMA difference
firma_lists.sort(); index1=firma_lists[0][-1]; index2 = firma_lists[-1][-1]
firma_list1 = firma_lists[0][1]; firma_list2 = firma_lists[-1][1]; firma_avg1 = firma_lists[0][0]; firma_avg2 = firma_lists[-1][0]
if calculate_normIntensity_p == 'yes':
try:
normIntensityP = statistics.runComparisonStatistic(firma_list1,firma_list2,probability_statistic)
except Exception: normIntensityP = 'NA' ### Occurs when analyzing two groups with no variance
else: normIntensityP = 'NA'
if normIntensityP == 1: normIntensityP = 'NA'
firma_fold_change = firma_avg2 - firma_avg1
firma_fold_change = -1*firma_fold_change ### Make this equivalent to Splicing Index fold which is also relative to experimental not control
if (firma_avg2*firma_avg1)<0: opposite_FIRMA_scores = 'yes'
else: opposite_FIRMA_scores = 'no'
if probeset in midas_db:
try: midas_p = float(midas_db[probeset])
except ValueError: midas_p = 0
else: midas_p = 0
#if probeset == '3263614': print firma_fold_change, normIntensityP, midas_p,'\n',firma_list1, firma_list2, [p_threshold];kill
if abs(firma_fold_change)>alt_exon_logfold_cutoff and (normIntensityP < p_threshold or normIntensityP == 'NA') and midas_p < p_threshold:
exonid = ed.ExonID(); critical_exon_list = [1,[exonid]]
#gene_expression_values = original_avg_const_exp_db[geneid]
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
### Re-define all of the pairwise values now that the two FIRMA groups to report have been determined
data_list1 = array_raw_group_values[probeset][index1]; data_list2 = array_raw_group_values[probeset][index2]
baseline_exp = statistics.avg(data_list1); experimental_exp = statistics.avg(data_list2); fold_change = experimental_exp - baseline_exp
group_name1 = array_group_list[index1]; group_name2 = array_group_list[index2]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 1
normInt1 = (baseline_exp-constit_exp1); normInt2 = (experimental_exp-constit_exp2); adj_fold = normInt2 - normInt1
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, group_name2+'_vs_'+group_name1)
fid = ExonData(firma_fold_change,probeset,critical_exon_list,geneid,data_list1,data_list2,normIntensityP,opposite_FIRMA_scores)
fid.setConstitutiveExpression(constit_exp1); fid.setConstitutiveFold(ge_fold); fid.setProbesetExpressionData(ped)
firma_hash.append((firma_fold_change,fid))
#print [[[probeset,firma_fold_change,normIntensityP,p_threshold]]]
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(firma_fold_change,geneid,normIntensityP)
excluded_probeset_db[probeset] = eed
print 'FIRMA analysis complete'
if export_NI_values == 'yes': data.close()
firma_hash.sort(); firma_hash.reverse()
print len(firma_hash),"Probesets with evidence of Alternative expression out of",len(excluded_probeset_db)+len(firma_hash)
p_value_call=''; permute_p_values = {}; summary_data_db['denominator_exp_events']=denominator_probesets
return firma_hash,p_value_call,permute_p_values, excluded_probeset_db
def getFilteredFilename(filename):
if array_type == 'junction':
filename = string.replace(filename,'.txt','-filtered.txt')
return filename
def getExonVersionFilename(filename):
original_filename = filename
if array_type == 'junction' or array_type == 'RNASeq':
if explicit_data_type != 'null':
filename = string.replace(filename,array_type,array_type+'/'+explicit_data_type)
### Make sure the file exists, otherwise, use the original
file_status = verifyFile(filename)
#print [[filename,file_status]]
if file_status != 'found': filename = original_filename
return filename
def importProbesetAligningDomains(exon_db,report_type):
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_domain_aligning_probesets.txt'
filename=getFilteredFilename(filename)
probeset_aligning_db = importGenericDBList(filename)
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_indirect_domain_aligning_probesets.txt'
filename=getFilteredFilename(filename)
probeset_indirect_aligning_db = importGenericDBList(filename)
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
new_exon_db={}; splicing_call_db={}
for probeset_pair in exon_db:
### For junction analyses exon_db is really regulated_exon_junction_db, containing the inclusion,exclusion probeset tuple and an object as values
ed = exon_db[probeset_pair]; geneid = ed.GeneID(); critical_exons = ed.CriticalExons()
for exon in critical_exons:
new_key = geneid+':'+exon
try: new_exon_db[new_key].append(probeset_pair)
except KeyError: new_exon_db[new_key] = [probeset_pair]
try: splicing_call_db[new_key].append(ed.SplicingCall())
except KeyError: splicing_call_db[new_key] = [ed.SplicingCall()]
for key in new_exon_db:
probeset_pairs = new_exon_db[key]; probeset_pair = probeset_pairs[0] ### grab one of the probeset pairs
ed = exon_db[probeset_pair]; geneid = ed.GeneID()
jd = SimpleJunctionData(geneid,'','','',probeset_pairs) ### use only those necessary fields for this function (probeset pairs will be called as CriticalExons)
splicing_call_db[key].sort(); splicing_call = splicing_call_db[key][-1]; jd.setSplicingCall(splicing_call) ### Bug from 1.15 to have key be new_key?
new_exon_db[key] = jd
exon_db = new_exon_db
gene_protein_ft_db={};domain_gene_count_db={};protein_functional_attribute_db={}; probeset_aligning_db2={}
splicing_call_db=[]; new_exon_db=[] ### Clear memory
for probeset in exon_db:
#if probeset == '107650':
#if probeset in probeset_aligning_db: print probeset_aligning_db[probeset];kill
if probeset in probeset_aligning_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
gene = exon_db[probeset].GeneID()
new_domain_list=[]; new_domain_list2=[]
if report_type == 'gene' and proceed == 'yes':
for domain in probeset_aligning_db[probeset]:
try: domain_gene_count_db[domain].append(gene)
except KeyError: domain_gene_count_db[domain] = [gene]
try: gene_protein_ft_db[gene].append(domain)
except KeyError: gene_protein_ft_db[gene]=[domain]
elif proceed == 'yes':
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
probeset_list = exon_db[probeset].CriticalExons()
else: probeset_list = [probeset]
for id in probeset_list:
for domain in probeset_aligning_db[probeset]:
new_domain_list.append('(direct)'+domain)
new_domain_list2.append((domain,'+'))
new_domain_list = unique.unique(new_domain_list)
new_domain_list_str = string.join(new_domain_list,', ')
gene_protein_ft_db[gene,id] = new_domain_list2
probeset_aligning_db2[id] = new_domain_list_str
#print exon_db['107650']
for probeset in exon_db:
if probeset in probeset_indirect_aligning_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
gene = exon_db[probeset].GeneID()
new_domain_list=[]; new_domain_list2=[]
if report_type == 'gene' and proceed == 'yes':
for domain in probeset_indirect_aligning_db[probeset]:
try: domain_gene_count_db[domain].append(gene)
except KeyError: domain_gene_count_db[domain] = [gene]
try: gene_protein_ft_db[gene].append(domain)
except KeyError: gene_protein_ft_db[gene]=[domain]
elif proceed == 'yes':
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
probeset_list = exon_db[probeset].CriticalExons()
else: probeset_list = [probeset]
for id in probeset_list:
for domain in probeset_indirect_aligning_db[probeset]:
new_domain_list.append('(indirect)'+domain)
new_domain_list2.append((domain,'-'))
new_domain_list = unique.unique(new_domain_list)
new_domain_list_str = string.join(new_domain_list,', ')
gene_protein_ft_db[gene,id] = new_domain_list2
probeset_aligning_db2[id] = new_domain_list_str
domain_gene_count_db = eliminate_redundant_dict_values(domain_gene_count_db)
gene_protein_ft_db = eliminate_redundant_dict_values(gene_protein_ft_db)
if analysis_method == 'ASPIRE' or analysis_method == 'linearregres':
clearObjectsFromMemory(exon_db);exon_db=[]
try: clearObjectsFromMemory(new_exon_db)
except Exception: null=[]
probeset_indirect_aligning_db=[]; probeset_aligning_db=[]
if report_type == 'perfect_match':
gene_protein_ft_db=[];domain_gene_count_db=[];protein_functional_attribute_db=[]
return probeset_aligning_db2
elif report_type == 'probeset':
probeset_aligning_db2=[]
return gene_protein_ft_db,domain_gene_count_db,protein_functional_attribute_db
else:
probeset_aligning_db2=[]; protein_functional_attribute_db=[]; probeset_aligning_db2=[]
len_gene_protein_ft_db = len(gene_protein_ft_db); gene_protein_ft_db=[]
return len_gene_protein_ft_db,domain_gene_count_db
def importProbesetProteinCompDomains(exon_db,report_type,comp_type):
filename = 'AltDatabase/'+species+'/'+array_type+'/probeset-domain-annotations-'+comp_type+'.txt'
if (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type != 'null': filename=getFilteredFilename(filename)
filename=getExonVersionFilename(filename)
probeset_aligning_db = importGeneric(filename)
filename = 'AltDatabase/'+species+'/'+array_type+'/probeset-protein-annotations-'+comp_type+'.txt'
if (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type != 'null': filename=getFilteredFilename(filename)
filename=getExonVersionFilename(filename)
gene_protein_ft_db={};domain_gene_count_db={}
for probeset in exon_db:
initial_proceed = 'no'; original_probeset = probeset
if probeset in probeset_aligning_db: initial_proceed = 'yes'
elif array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
### For junction analyses exon_db is really regulated_exon_junction_db, containing the inclusion,exclusion probeset tuple and an object as values
if '|' in probeset[0]: probeset1 = string.split(probeset[0],'|')[0]; probeset = probeset1,probeset[1]
try: alternate_probeset_id = exon_db[probeset].InclusionLookup(); probeset = alternate_probeset_id,probeset[1]
except Exception: null=[]
probeset_joined = string.join(probeset,'|')
#print [probeset_joined],[probeset]
if probeset_joined in probeset_aligning_db: initial_proceed = 'yes'; probeset = probeset_joined
elif probeset[0] in probeset_aligning_db: initial_proceed = 'yes'; probeset = probeset[0]
elif probeset[1] in probeset_aligning_db: initial_proceed = 'yes'; probeset = probeset[1]
#else: for i in probeset_aligning_db: print [i];kill
if initial_proceed == 'yes':
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[original_probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
new_domain_list = []
gene = exon_db[original_probeset].GeneID()
if report_type == 'gene' and proceed == 'yes':
for domain_data in probeset_aligning_db[probeset]:
try:
domain,call = string.split(domain_data,'|')
except Exception:
values = string.split(domain_data,'|')
domain = values[0]; call = values[-1] ### occurs when a | exists in the annotations from UniProt
try: domain_gene_count_db[domain].append(gene)
except KeyError: domain_gene_count_db[domain] = [gene]
try: gene_protein_ft_db[gene].append(domain)
except KeyError: gene_protein_ft_db[gene]=[domain]
elif proceed == 'yes':
for domain_data in probeset_aligning_db[probeset]:
domain,call = string.split(domain_data,'|')
new_domain_list.append((domain,call))
#new_domain_list = string.join(new_domain_list,', ')
gene_protein_ft_db[gene,original_probeset] = new_domain_list
domain_gene_count_db = eliminate_redundant_dict_values(domain_gene_count_db)
probeset_aligning_db=[] ### Clear memory
probeset_aligning_protein_db = importGeneric(filename)
probeset_pairs={} ### Store all possible probeset pairs as single probesets for protein-protein associations
for probeset in exon_db:
if len(probeset)==2:
for p in probeset: probeset_pairs[p] = probeset
if report_type == 'probeset':
### Below code was re-written to be more memory efficient by not storing all data in probeset-domain-annotations-*comp*.txt via generic import
protein_functional_attribute_db={}; probeset_protein_associations={}; protein_db={}
for probeset in exon_db:
initial_proceed = 'no'; original_probeset = probeset
if probeset in probeset_aligning_protein_db: initial_proceed = 'yes'
elif array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if '|' in probeset[0]: probeset1 = string.split(probeset[0],'|')[0]; probeset = probeset1,probeset[1]
try: alternate_probeset_id = exon_db[probeset].InclusionLookup(); probeset = alternate_probeset_id,probeset[1]
except Exception: null=[]
probeset_joined = string.join(probeset,'|')
#print [probeset_joined],[probeset]
if probeset_joined in probeset_aligning_protein_db: initial_proceed = 'yes'; probeset = probeset_joined
elif probeset[0] in probeset_aligning_protein_db: initial_proceed = 'yes'; probeset = probeset[0]
elif probeset[1] in probeset_aligning_protein_db: initial_proceed = 'yes'; probeset = probeset[1]
#else: for i in probeset_aligning_db: print [i];kill
if initial_proceed == 'yes':
protein_data_list=probeset_aligning_protein_db[probeset]
new_protein_list = []
gene = exon_db[original_probeset].GeneID()
for protein_data in protein_data_list:
protein_info,call = string.split(protein_data,'|')
if 'AA:' in protein_info:
protein_info_r = string.replace(protein_info,')','*')
protein_info_r = string.replace(protein_info_r,'(','*')
protein_info_r = string.split(protein_info_r,'*')
null_protein = protein_info_r[1]; hit_protein = protein_info_r[3]
probeset_protein_associations[original_probeset] = null_protein,hit_protein,call
protein_db[null_protein] = []; protein_db[hit_protein] = []
new_protein_list.append((protein_info,call))
#new_protein_list = string.join(new_domain_list,', ')
protein_functional_attribute_db[gene,original_probeset] = new_protein_list
filename = 'AltDatabase/'+species+'/'+array_type+'/SEQUENCE-protein-dbase_'+comp_type+'.txt'
filename=getExonVersionFilename(filename)
protein_seq_db = importGenericFiltered(filename,protein_db)
for key in protein_functional_attribute_db:
gene,probeset = key
try:
null_protein,hit_protein,call = probeset_protein_associations[probeset]
null_seq = protein_seq_db[null_protein][0]; hit_seq = protein_seq_db[hit_protein][0]
seq_attr = 'sequence: ' +'('+null_protein+')'+null_seq +' -> '+'('+hit_protein+')'+hit_seq
protein_functional_attribute_db[key].append((seq_attr,call))
except KeyError: null=[]
protein_seq_db=[]; probeset_aligning_protein_db=[]
return gene_protein_ft_db,domain_gene_count_db,protein_functional_attribute_db
else:
probeset_aligning_protein_db=[]; len_gene_protein_ft_db = len(gene_protein_ft_db); gene_protein_ft_db=[]
return len_gene_protein_ft_db,domain_gene_count_db
class SimpleJunctionData:
def __init__(self, geneid, probeset1, probeset2, probeset1_display, critical_exon_list):
self._geneid = geneid; self._probeset1 = probeset1; self._probeset2 = probeset2
self._probeset1_display = probeset1_display; self._critical_exon_list = critical_exon_list
def GeneID(self): return self._geneid
def Probeset1(self): return self._probeset1
def Probeset2(self): return self._probeset2
def InclusionDisplay(self): return self._probeset1_display
def CriticalExons(self): return self._critical_exon_list
def setSplicingCall(self,splicing_call):
#self._splicing_call = EvidenceOfAltSplicing(slicing_annot)
self._splicing_call = splicing_call
def setSymbol(self,symbol): self.symbol = symbol
def Symbol(self): return self.symbol
def SplicingCall(self): return self._splicing_call
def setInclusionLookup(self,incl_junction_probeset): self.incl_junction_probeset = incl_junction_probeset
def InclusionLookup(self): return self.incl_junction_probeset
def formatJunctionData(probesets,affygene,critical_exon_list):
if '|' in probesets[0]: ### Only return the first inclusion probeset (agglomerated probesets)
incl_list = string.split(probesets[0],'|')
incl_probeset = incl_list[0]; excl_probeset = probesets[1]
else: incl_probeset = probesets[0]; excl_probeset = probesets[1]
jd = SimpleJunctionData(affygene,incl_probeset,excl_probeset,probesets[0],critical_exon_list)
key = incl_probeset,excl_probeset
return key,jd
class JunctionExpressionData:
def __init__(self, baseline_norm_exp, exper_norm_exp, pval, ped):
self.baseline_norm_exp = baseline_norm_exp; self.exper_norm_exp = exper_norm_exp; self.pval = pval; self.ped = ped
def ConNI(self):
ls=[]
for i in self.logConNI():
ls.append(math.pow(2,i))
return ls
def ExpNI(self):
ls=[]
for i in self.logExpNI():
ls.append(math.pow(2,i))
return ls
def ConNIAvg(self): return math.pow(2,statistics.avg(self.logConNI()))
def ExpNIAvg(self): return math.pow(2,statistics.avg(self.logExpNI()))
def logConNI(self): return self.baseline_norm_exp
def logExpNI(self): return self.exper_norm_exp
def Pval(self): return self.pval
def ProbesetExprData(self): return self.ped
def __repr__(self): return self.ConNI()+'|'+self.ExpNI()
def calculateAllASPIREScores(p1,p2):
b1o = p1.ConNIAvg(); b2o = p2.ConNIAvg()
e1o = p1.ExpNIAvg(); e2o = p2.ExpNIAvg(); original_score = statistics.aspire_stringent(b1o,e1o,b2o,e2o)
index=0; baseline_scores=[] ### Loop through each control ratio and compare to control ratio mean
for b1 in p1.ConNI():
b2 = p2.ConNI()[index]
score = statistics.aspire_stringent(b2,e2o,b1,e1o); index+=1
baseline_scores.append(score)
index=0; exp_scores=[] ### Loop through each experimental ratio and compare to control ratio mean
for e1 in p1.ExpNI():
e2 = p2.ExpNI()[index]
score = statistics.aspire_stringent(b1o,e1,b2o,e2); index+=1
exp_scores.append(score)
try:
aspireP = statistics.runComparisonStatistic(baseline_scores,exp_scores,probability_statistic)
except Exception: aspireP = 'NA' ### Occurs when analyzing two groups with no variance
if aspireP == 1: aspireP = 'NA'
"""
if aspireP<0.05 and oscore>0.2 and statistics.avg(exp_scores)<0:
index=0
for e1 in p1.ExpNI():
e2 = p2.ExpNI()[index]
score = statistics.aspire_stringent(b1,e1,b2,e2)
print p1.ExpNI(), p2.ExpNI(); print e1, e2
print e1o,e2o; print b1, b2; print score, original_score
print exp_scores, statistics.avg(exp_scores); kill"""
return baseline_scores, exp_scores, aspireP
def stringListConvert(ls):
ls2=[]
for i in ls: ls2.append(str(i))
return ls2
def analyzeJunctionSplicing(nonlog_NI_db):
group_sizes = []; original_array_indices = permute_lists[0] ###p[0] is the original organization of the group samples prior to permutation
for group in original_array_indices: group_sizes.append(len(group))
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db)>0:
temp_db={}
for probeset in nonlog_NI_db: temp_db[probeset]=[]
for probeset in temp_db:
try: filtered_probeset_db[probeset]
except KeyError: del nonlog_NI_db[probeset]
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
global NIdata_export
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
NIdata_export = export.ExportFile(summary_output)
title = string.join(['inclusion-probeset','exclusion-probeset']+original_array_names,'\t')+'\n'; NIdata_export.write(title)
### Calculate a probeset p-value adjusted for constitutive expression levels (taken from splicing index method)
xl=0
probeset_normIntensity_db={}
for probeset in array_raw_group_values:
ed = exon_db[probeset]; geneid = ed.GeneID(); xl+=1
#if geneid in alt_junction_db and geneid in original_avg_const_exp_db: ### Don't want this filter since it causes problems for Trans-splicing
group_index = 0; si_interim_group_db={}; ge_threshold_count=0; value_count = 0
### Prepare normalized expression lists for recipricol-junction algorithms
if geneid in avg_const_exp_db:
for group_values in array_raw_group_values[probeset]:
value_index = 0; ratio_hash=[]
for value in group_values: ###Calculate normalized ratio's for each condition and save raw values for later permutation
exp_val = value;ge_val = avg_const_exp_db[geneid][value_count]; exp_ratio = exp_val-ge_val
ratio_hash.append(exp_ratio); value_index +=1; value_count +=1
si_interim_group_db[group_index] = ratio_hash
group_index+=1
group1_ratios = si_interim_group_db[0]; group2_ratios = si_interim_group_db[1]
### Calculate and store simple expression summary stats
data_list1 = array_raw_group_values[probeset][0]; data_list2 = array_raw_group_values[probeset][1]
baseline_exp = statistics.avg(data_list1); experimental_exp = statistics.avg(data_list2); fold_change = experimental_exp - baseline_exp
#group_name1 = array_group_list[0]; group_name2 = array_group_list[1]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 'NA'
if ttest_exp_p == 1: ttest_exp_p = 'NA'
adj_fold = statistics.avg(group2_ratios) - statistics.avg(group1_ratios)
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, '')
try:
try:
normIntensityP = statistics.runComparisonStatistic(group1_ratios,group2_ratios,probability_statistic)
except Exception:
#print group1_ratios,group2_ratios,array_raw_group_values[probeset],avg_const_exp_db[geneid];kill
normIntensityP = 'NA' ###occurs for constitutive probesets
except Exception: normIntensityP = 0
if normIntensityP == 1: normIntensityP = 'NA'
ji = JunctionExpressionData(group1_ratios, group2_ratios, normIntensityP, ped)
probeset_normIntensity_db[probeset]=ji ### store and access this below
#if probeset == 'G6899622@J916374@j_at': print normIntensityP,group1_ratios,group2_ratios;kill
###Concatenate the two raw expression groups into a single list for permutation analysis
ls_concatenated = []
for group in array_raw_group_values[probeset]:
for entry in group: ls_concatenated.append(entry)
if analysis_method == 'linearregres': ###Convert out of log space
ls_concatenated = statistics.log_fold_conversion_fraction(ls_concatenated)
array_raw_group_values[probeset] = ls_concatenated
s = 0; t = 0; y = ''; denominator_events=0; excluded_probeset_db = {}
splice_event_list=[]; splice_event_list_mx=[]; splice_event_list_non_mx=[]; event_mx_temp = []; permute_p_values={} #use this to exclude duplicate mx events
for affygene in alt_junction_db:
if affygene in original_avg_const_exp_db:
constit_exp1 = original_avg_const_exp_db[affygene][0]
constit_exp2 = original_avg_const_exp_db[affygene][1]
ge_fold=constit_exp2-constit_exp1
for event in alt_junction_db[affygene]:
if array_type == 'AltMouse':
#event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
#critical_exon_db[affygene,tuple(critical_exons)] = [1,'E'+str(e1a),'E'+str(e2b)] --- affygene,tuple(event) == key, 1 indicates both are either up or down together
event_call = event[0][0] + '-' + event[1][0]
exon_set1 = event[0][1]; exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene,exon_set1]
probeset2 = exon_dbase[affygene,exon_set2]
critical_exon_list = critical_exon_db[affygene,tuple(event)]
if array_type == 'junction' or array_type == 'RNASeq':
event_call = 'ei-ex' ### Below objects from JunctionArrayEnsemblRules - class JunctionInformation
probeset1 = event.InclusionProbeset(); probeset2 = event.ExclusionProbeset()
exon_set1 = event.InclusionJunction(); exon_set2 = event.ExclusionJunction()
try: novel_event = event.NovelEvent()
except Exception: novel_event = 'known'
critical_exon_list = [1,event.CriticalExonSets()]
key,jd = formatJunctionData([probeset1,probeset2],affygene,critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try: jd.setSymbol(annotate_db[affygene].Symbol())
except Exception:null=[]
#if '|' in probeset1: print probeset1, key,jd.InclusionDisplay();kill
probeset_comp_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
#print probeset1,probeset2, critical_exon_list,event_call,exon_set1,exon_set2;kill
if probeset1 in nonlog_NI_db and probeset2 in nonlog_NI_db:
denominator_events+=1
try: p1 = probeset_normIntensity_db[probeset1]; p2 = probeset_normIntensity_db[probeset2]
except Exception:
print probeset1, probeset2
p1 = probeset_normIntensity_db[probeset1]
p2 = probeset_normIntensity_db[probeset2]
#if '|' in probeset1: print
pp1 = p1.Pval(); pp2 = p2.Pval()
baseline_ratio1 = p1.ConNIAvg()
experimental_ratio1 = p1.ExpNIAvg()
baseline_ratio2 = p2.ConNIAvg()
experimental_ratio2 = p2.ExpNIAvg()
ped1 = p1.ProbesetExprData()
ped2 = p2.ProbesetExprData()
Rin = ''; Rex = ''
r = 0 ###Variable used to determine if we should take the absolute value of dI for mutually exlcusive events
if event_call == 'ei-ex': #means probeset1 is an exon inclusion and probeset2 is an exon exclusion
Rin = baseline_ratio1/experimental_ratio1 # Rin=A/C
Rex = baseline_ratio2/experimental_ratio2 # Rin=B/D
I1=baseline_ratio1/(baseline_ratio1+baseline_ratio2)
I2=experimental_ratio1/(experimental_ratio1+experimental_ratio2)
###When Rex is larger, the exp_ratio for exclusion is decreased in comparison to baseline.
###Thus, increased inclusion (when Rin is small, inclusion is big)
if (Rin>1 and Rex<1): y = 'downregulated'
elif (Rin<1 and Rex>1): y = 'upregulated'
elif (Rex<Rin): y = 'downregulated'
else: y = 'upregulated'
temp_list = []
if event_call == 'mx-mx':
temp_list.append(exon_set1); temp_list.append(exon_set2);temp_list.sort()
if (affygene,temp_list) not in event_mx_temp: #use this logic to prevent mx entries being added more than once
event_mx_temp.append((affygene,temp_list))
###Arbitrarily choose which exon-set will be Rin or Rex, does matter for mutually exclusive events
Rin = baseline_ratio1/experimental_ratio1 # Rin=A/C
Rex = baseline_ratio2/experimental_ratio2 # Rin=B/D
I1=baseline_ratio1/(baseline_ratio1+baseline_ratio2)
I2=experimental_ratio1/(experimental_ratio1+experimental_ratio2)
y = 'mutually-exclusive'; r = 1
if analysis_method == 'ASPIRE' and Rex != '':
#if affygene == 'ENSMUSG00000000126': print Rin, Rex, probeset1, probeset2
if (Rin>1 and Rex<1) or (Rin<1 and Rex>1):
s +=1
in1=((Rex-1.0)*Rin)/(Rex-Rin); in2=(Rex-1.0)/(Rex-Rin)
dI = ((in2-in1)+(I2-I1))/2.0 #modified to give propper exon inclusion
dI = dI*(-1) ### Reverse the fold to make equivalent to splicing-index and FIRMA scores
try: baseline_scores, exp_scores, aspireP = calculateAllASPIREScores(p1,p2)
except Exception: baseline_scores = [0]; exp_scores=[dI]; aspireP = 0
if export_NI_values == 'yes':
baseline_scores = stringListConvert(baseline_scores); exp_scores = stringListConvert(exp_scores)
ev = string.join([probeset1,probeset2]+baseline_scores+exp_scores,'\t')+'\n'; NIdata_export.write(ev)
if max_replicates >2 or equal_replicates==2:
permute_p_values[(probeset1,probeset2)] = [aspireP, 'NA', 'NA', 'NA']
if r == 1: dI = abs(dI) ###Occurs when event is mutually exclusive
#if abs(dI)>alt_exon_logfold_cutoff: print [dI],pp1,pp2,aspireP;kill
#print [affygene,dI,pp1,pp2,aspireP,event.CriticalExonSets(),probeset1,probeset2,alt_exon_logfold_cutoff,p_threshold]
if ((pp1<p_threshold or pp2<p_threshold) or pp1==1 or pp1=='NA') and abs(dI) > alt_exon_logfold_cutoff: ###Require that the splice event have a constitutive corrected p less than the user defined threshold
ejd = ExonJunctionData(dI,probeset1,probeset2,pp1,pp2,y,event_call,critical_exon_list,affygene,ped1,ped2)
"""if probeset1 == 'ENSMUSG00000033335:E16.1-E17.1' and probeset2 == 'ENSMUSG00000033335:E16.1-E19.1':
print [dI,pp1,pp2,p_threshold,alt_exon_logfold_cutoff]
print baseline_scores, exp_scores, [aspireP]#;sys.exit()"""
ejd.setConstitutiveExpression(constit_exp1); ejd.setConstitutiveFold(ge_fold)
if perform_permutation_analysis == 'yes': splice_event_list.append((dI,ejd))
elif aspireP < permute_p_threshold or aspireP=='NA': splice_event_list.append((dI,ejd))
#if abs(dI)>.2: print probeset1, probeset2, critical_exon_list, [exon_set1], [exon_set2]
#if dI>.2 and aspireP<0.05: print baseline_scores,exp_scores,aspireP, statistics.avg(exp_scores), dI
elif array_type == 'junction' or array_type == 'RNASeq':
excluded_probeset_db[affygene+':'+event.CriticalExonSets()[0]] = probeset1, affygene, dI, 'NA', aspireP
if array_type == 'RNASeq':
try: ejd.setNovelEvent(novel_event)
except Exception: None
if analysis_method == 'linearregres' and Rex != '':
s+=1
log_fold,linregressP,rsqrd_status = getLinearRegressionScores(probeset1,probeset2,group_sizes)
log_fold = log_fold ### Reverse the fold to make equivalent to splicing-index and FIRMA scores
if max_replicates >2 or equal_replicates==2: permute_p_values[(probeset1,probeset2)] = [linregressP, 'NA', 'NA', 'NA']
if rsqrd_status == 'proceed':
if ((pp1<p_threshold or pp2<p_threshold) or pp1==1 or pp1=='NA') and abs(log_fold) > alt_exon_logfold_cutoff: ###Require that the splice event have a constitutive corrected p less than the user defined threshold
ejd = ExonJunctionData(log_fold,probeset1,probeset2,pp1,pp2,y,event_call,critical_exon_list,affygene,ped1,ped2)
ejd.setConstitutiveExpression(constit_exp1); ejd.setConstitutiveFold(ge_fold)
if perform_permutation_analysis == 'yes': splice_event_list.append((log_fold,ejd))
elif linregressP < permute_p_threshold: splice_event_list.append((log_fold,ejd))
#if probeset1 == 'G6990053@762121_762232_at' and probeset2 == 'G6990053@J926254@j_at':
#print event_call, critical_exon_list,affygene, Rin, Rex, y, temp_list;kill
elif array_type == 'junction' or array_type == 'RNASeq':
excluded_probeset_db[affygene+':'+event.CriticalExonSets()[0]] = probeset1, affygene, log_fold, 'NA', linregressP
if array_type == 'RNASeq':
try: ejd.setNovelEvent(novel_event)
except Exception: None
else: t +=1
clearObjectsFromMemory(probeset_normIntensity_db)
probeset_normIntensity_db={}; ### Potentially large memory object containing summary stats for all probesets
statistics.adjustPermuteStats(permute_p_values)
summary_data_db['denominator_exp_events']=denominator_events
print "Number of exon-events analyzed:", s
print "Number of exon-events excluded:", t
return splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db
def maxReplicates():
replicates=0; greater_than_two=0; greater_than_one=0; group_sizes=[]
for probeset in array_raw_group_values:
for group_values in array_raw_group_values[probeset]:
try:
replicates+=len(group_values); group_sizes.append(len(group_values))
if len(group_values)>2: greater_than_two+=1
elif len(group_values)>1: greater_than_one+=1
except Exception: replicates+=len(array_raw_group_values[probeset]); break
break
group_sizes = unique.unique(group_sizes)
if len(group_sizes) == 1: equal_replicates = group_sizes[0]
else: equal_replicates = 0
max_replicates = replicates/float(original_conditions)
if max_replicates<2.01:
if greater_than_two>0 and greater_than_one>0: max_replicates=3
return max_replicates, equal_replicates
def furtherProcessJunctionScores(splice_event_list, probeset_comp_db, permute_p_values):
splice_event_list.sort(); splice_event_list.reverse()
print "filtered %s scores:" % analysis_method, len(splice_event_list)
if perform_permutation_analysis == 'yes':
###*********BEGIN PERMUTATION ANALYSIS*********
if max_replicates >2 or equal_replicates==2:
splice_event_list, p_value_call, permute_p_values = permuteSplicingScores(splice_event_list)
else:
print "WARNING...Not enough replicates to perform permutation analysis."
p_value_call=''; permute_p_values = {}
else:
if max_replicates >2 or equal_replicates==2:
if probability_statistic == 'unpaired t-test':
p_value_call=analysis_method+'-OneWayAnova'
else:
p_value_call=analysis_method+'-'+probability_statistic
else:
if probability_statistic == 'unpaired t-test':
p_value_call='OneWayAnova'; permute_p_values = {}
else:
p_value_call=probability_statistic; permute_p_values = {}
print len(splice_event_list), 'alternative events after subsequent filtering (optional)'
### Get ExonJunction annotaitons
junction_splicing_annot_db = getJunctionSplicingAnnotations(probeset_comp_db)
regulated_exon_junction_db={}; new_splice_event_list=[]
if filter_for_AS == 'yes': print "Filtering for evidence of Alternative Splicing"
for (fold,ejd) in splice_event_list:
proceed = 'no'
if filter_for_AS == 'yes':
try:
ja = junction_splicing_annot_db[ejd.Probeset1(),ejd.Probeset2()]; splicing_call = ja.SplicingCall()
if splicing_call == 1: proceed = 'yes'
except KeyError: proceed = 'no'
else: proceed = 'yes'
if proceed == 'yes':
key,jd = formatJunctionData([ejd.Probeset1(),ejd.Probeset2()],ejd.GeneID(),ejd.CriticalExons())
regulated_exon_junction_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
new_splice_event_list.append((fold,ejd))
### Add junction probeset lookup for reciprocal junctions composed of an exonid (not in protein database currently)
if array_type == 'RNASeq' and '-' not in key[0]: ### Thus, it is an exon compared to a junction
events = alt_junction_db[ejd.GeneID()]
for ji in events:
if (ji.InclusionProbeset(),ji.ExclusionProbeset()) == key:
jd.setInclusionLookup(ji.InclusionLookup()) ### This is the source junction from which the exon ID comes from
probeset_comp_db[ji.InclusionLookup(),ji.ExclusionProbeset()]=jd
#print ji.InclusionProbeset(),ji.ExclusionProbeset(),' ',ji.InclusionLookup()
if filter_for_AS == 'yes': print len(new_splice_event_list), "remaining after filtering for evidence of Alternative splicing"
filtered_exon_db = {}
for junctions in probeset_comp_db:
rj = probeset_comp_db[junctions] ### Add splicing annotations to the AltMouse junction DBs (needed for permutation analysis statistics and filtering)
try: ja = junction_splicing_annot_db[junctions]; splicing_call = ja.SplicingCall(); rj.setSplicingCall(ja.SplicingCall())
except KeyError: rj.setSplicingCall(0)
if filter_for_AS == 'yes': filtered_exon_db[junctions] = rj
for junctions in regulated_exon_junction_db:
rj = regulated_exon_junction_db[junctions]
try: ja = junction_splicing_annot_db[junctions]; rj.setSplicingCall(ja.SplicingCall())
except KeyError: rj.setSplicingCall(0)
if filter_for_AS == 'yes': probeset_comp_db = filtered_exon_db
try: clearObjectsFromMemory(alt_junction_db)
except Exception: null=[]
return new_splice_event_list, p_value_call, permute_p_values, probeset_comp_db, regulated_exon_junction_db
class SplicingScoreData:
def Method(self):
###e.g. ASPIRE
return self._method
def Score(self): return str(self._score)
def Probeset1(self): return self._probeset1
def Probeset2(self): return self._probeset2
def RegulationCall(self): return self._regulation_call
def GeneID(self): return self._geneid
def CriticalExons(self): return self._critical_exon_list[1]
def CriticalExonTuple(self): return self._critical_exon_list
def TTestNormalizedRatios(self): return self._normIntensityP
def TTestNormalizedRatios2(self): return self._normIntensityP2
def setConstitutiveFold(self,exp_log_ratio): self._exp_log_ratio = exp_log_ratio
def ConstitutiveFold(self): return str(self._exp_log_ratio)
def setConstitutiveExpression(self,const_baseline): self.const_baseline = const_baseline
def ConstitutiveExpression(self): return str(self.const_baseline)
def setProbesetExpressionData(self,ped): self.ped1 = ped
def ProbesetExprData1(self): return self.ped1
def ProbesetExprData2(self): return self.ped2
def setNovelEvent(self,novel_event): self._novel_event = novel_event
def NovelEvent(self): return self._novel_event
def EventCall(self):
###e.g. Exon inclusion (ei) Exon exclusion (ex), ei-ex, reported in that direction
return self._event_call
def Report(self):
output = self.Method() +'|'+ self.GeneID() +'|'+ string.join(self.CriticalExons(),'|')
return output
def __repr__(self): return self.Report()
class ExonJunctionData(SplicingScoreData):
def __init__(self,score,probeset1,probeset2,probeset1_p,probeset2_p,regulation_call,event_call,critical_exon_list,affygene,ped1,ped2):
self._score = score; self._probeset1 = probeset1; self._probeset2 = probeset2; self._regulation_call = regulation_call
self._event_call = event_call; self._critical_exon_list = critical_exon_list; self._geneid = affygene
self._method = analysis_method; self._normIntensityP = probeset1_p; self._normIntensityP2 = probeset2_p
self.ped1 = ped1; self.ped2=ped2
class ExonData(SplicingScoreData):
def __init__(self,splicing_index,probeset,critical_exon_list,geneid,group1_ratios,group2_ratios,normIntensityP,opposite_SI_log_mean):
self._score = splicing_index; self._probeset1 = probeset; self._opposite_SI_log_mean = opposite_SI_log_mean
self._critical_exon_list = critical_exon_list; self._geneid = geneid
self._baseline_ratio1 = group1_ratios; self._experimental_ratio1 = group2_ratios
self._normIntensityP = normIntensityP
self._method = analysis_method; self._event_call = 'exon-inclusion'
if splicing_index > 0: regulation_call = 'downregulated' ###Since baseline is the numerator ratio
else: regulation_call = 'upregulated'
self._regulation_call = regulation_call
def OppositeSIRatios(self): return self._opposite_SI_log_mean
class ExcludedExonData(ExonData):
def __init__(self,splicing_index,geneid,normIntensityP):
self._score = splicing_index; self._geneid = geneid; self._normIntensityP = normIntensityP
def getAllPossibleLinearRegressionScores(probeset1,probeset2,positions,group_sizes):
### Get Raw expression values for the two probests
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
all_possible_scores=[]; index1=0 ### Perform all possible pairwise comparisons between groups (not sure how this will work for 10+ groups)
for (pos1a,pos2a) in positions:
index2=0
for (pos1b,pos2b) in positions:
if pos1a != pos1b:
p1_g1 = p1_exp[pos1a:pos2a]; p1_g2 = p1_exp[pos1b:pos2b]
p2_g1 = p2_exp[pos1a:pos2a]; p2_g2 = p2_exp[pos1b:pos2b]
#log_fold, linregressP, rsqrd = getAllLinearRegressionScores(probeset1,probeset2,p1_g1,p2_g1,p1_g2,p2_g2,len(group_sizes)) ### Used to calculate a pairwise group pvalue
log_fold, rsqrd = performLinearRegression(p1_g1,p2_g1,p1_g2,p2_g2)
if log_fold<0: i1,i2 = index2,index1 ### all scores should indicate upregulation
else: i1,i2=index1,index2
all_possible_scores.append((abs(log_fold),i1,i2))
index2+=1
index1+=1
all_possible_scores.sort()
try: log_fold,index1,index2 = all_possible_scores[-1]
except Exception: log_fold=0; index1=0; index2=0
return log_fold, index1, index2
def getLinearRegressionScores(probeset1,probeset2,group_sizes):
### Get Raw expression values for the two probests
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
try:
p1_g1 = p1_exp[:group_sizes[0]]; p1_g2 = p1_exp[group_sizes[0]:]
p2_g1 = p2_exp[:group_sizes[0]]; p2_g2 = p2_exp[group_sizes[0]:]
except Exception:
print probeset1,probeset2
print p1_exp
print p2_exp
print group_sizes
force_kill
log_fold, linregressP, rsqrd = getAllLinearRegressionScores(probeset1,probeset2,p1_g1,p2_g1,p1_g2,p2_g2,2)
return log_fold, linregressP, rsqrd
def getAllLinearRegressionScores(probeset1,probeset2,p1_g1,p2_g1,p1_g2,p2_g2,groups):
log_fold, rsqrd = performLinearRegression(p1_g1,p2_g1,p1_g2,p2_g2)
try:
### Repeat for each sample versus baselines to calculate a p-value
index=0; group1_scores=[]
for p1_g1_sample in p1_g1:
p2_g1_sample = p2_g1[index]
log_f, rs = performLinearRegression(p1_g1,p2_g1,[p1_g1_sample],[p2_g1_sample])
group1_scores.append(log_f); index+=1
index=0; group2_scores=[]
for p1_g2_sample in p1_g2:
p2_g2_sample = p2_g2[index]
log_f, rs = performLinearRegression(p1_g1,p2_g1,[p1_g2_sample],[p2_g2_sample])
group2_scores.append(log_f); index+=1
try:
linregressP = statistics.runComparisonStatistic(group1_scores,group2_scores,probability_statistic)
except Exception:
linregressP = 0; group1_scores = [0]; group2_scores = [log_fold]
if linregressP == 1: linregressP = 0
except Exception:
linregressP = 0; group1_scores = [0]; group2_scores = [log_fold]
if export_NI_values == 'yes' and groups==2:
group1_scores = stringListConvert(group1_scores)
group2_scores = stringListConvert(group2_scores)
ev = string.join([probeset1,probeset2]+group1_scores+group2_scores,'\t')+'\n'; NIdata_export.write(ev)
return log_fold, linregressP, rsqrd
def performLinearRegression(p1_g1,p2_g1,p1_g2,p2_g2):
return_rsqrd = 'no'
if use_R == 'yes': ###Uses the RLM algorithm
#print "Performing Linear Regression analysis using rlm."
g1_slope = statistics.LinearRegression(p1_g1,p2_g1,return_rsqrd)
g2_slope = statistics.LinearRegression(p1_g2,p2_g2,return_rsqrd)
else: ###Uses a basic least squared method
#print "Performing Linear Regression analysis using python specific methods."
g1_slope = statistics.simpleLinRegress(p1_g1,p2_g1)
g2_slope = statistics.simpleLinRegress(p1_g2,p2_g2)
log_fold = statistics.convert_to_log_fold(g2_slope/g1_slope)
rsqrd = 'proceed'
#if g1_rsqrd > 0 and g2_rsqrd > 0: rsqrd = 'proceed'
#else: rsqrd = 'hault'
return log_fold, rsqrd
########### Permutation Analysis Functions ###########
def permuteLinearRegression(probeset1,probeset2,p):
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
p1_g1, p1_g2 = permute_samples(p1_exp,p)
p2_g1, p2_g2 = permute_samples(p2_exp,p)
return_rsqrd = 'no'
if use_R == 'yes': ###Uses the RLM algorithm
g1_slope = statistics.LinearRegression(p1_g1,p2_g1,return_rsqrd)
g2_slope = statistics.LinearRegression(p1_g2,p2_g2,return_rsqrd)
else: ###Uses a basic least squared method
g1_slope = statistics.simpleLinRegress(p1_g1,p2_g1)
g2_slope = statistics.simpleLinRegress(p1_g2,p2_g2)
log_fold = statistics.convert_to_log_fold(g2_slope/g1_slope)
return log_fold
def permuteSplicingScores(splice_event_list):
p_value_call = 'lowest_raw_p'
permute_p_values = {}; splice_event_list2=[]
if len(permute_lists) > 0:
#tuple_data in splice_event_list = dI,probeset1,probeset2,y,event_call,critical_exon_list
all_samples = []; a = 0
for (score,x) in splice_event_list:
###NOTE: This reference dI differs slightly from the below calculated, since the values are calculated from raw relative ratios rather than the avg
###Solution: Use the first calculated dI as the reference
score = score*(-1) ### Reverse the score to make equivalent to splicing-index and FIRMA scores
ref_splice_val = score; probeset1 = x.Probeset1(); probeset2 = x.Probeset2(); affygene = x.GeneID()
y = 0; p_splice_val_dist = []; count = 0; return_rsqrd = 'no'
for p in permute_lists: ###There are two lists in each entry
count += 1
permute = 'yes'
if analysis_method == 'ASPIRE':
p_splice_val = permute_ASPIRE_filtered(affygene, probeset1,probeset2,p,y,ref_splice_val,x)
elif analysis_method == 'linearregres':
slope_ratio = permuteLinearRegression(probeset1,probeset2,p)
p_splice_val = slope_ratio
if p_splice_val != 'null': p_splice_val_dist.append(p_splice_val)
y+=1
p_splice_val_dist.sort()
new_ref_splice_val = str(abs(ref_splice_val)); new_ref_splice_val = float(new_ref_splice_val[0:8]) #otherwise won't match up the scores correctly
if analysis_method == 'linearregres':
if ref_splice_val<0:
p_splice_val_dist2=[]
for val in p_splice_val_dist: p_splice_val_dist2.append(-1*val)
p_splice_val_dist=p_splice_val_dist2; p_splice_val_dist.reverse()
p_val, pos_permute, total_permute, greater_than_true_permute = statistics.permute_p(p_splice_val_dist,new_ref_splice_val,len(permute_lists))
#print p_val,ref_splice_val, pos_permute, total_permute, greater_than_true_permute,p_splice_val_dist[-3:];kill
###When two groups are of equal size, there will be 2 pos_permutes rather than 1
if len(permute_lists[0][0]) == len(permute_lists[0][1]): greater_than_true_permute = (pos_permute/2) - 1 #size of the two groups are equal
else:greater_than_true_permute = (pos_permute) - 1
if analysis_method == 'linearregres': greater_than_true_permute = (pos_permute) - 1 ###since this is a one sided test, unlike ASPIRE
###Below equation is fine if the population is large
permute_p_values[(probeset1,probeset2)] = [p_val, pos_permute, total_permute, greater_than_true_permute]
###Remove non-significant linear regression results
if analysis_method == 'linearregres':
if p_val <= permute_p_threshold or greater_than_true_permute < 2: splice_event_list2.append((score,x)) ###<= since many p=0.05
print "Number of permutation p filtered splice event:",len(splice_event_list2)
if len(permute_p_values)>0: p_value_call = 'permuted_aspire_p-value'
if analysis_method == 'linearregres': splice_event_list = splice_event_list2
return splice_event_list, p_value_call, permute_p_values
def permute_ASPIRE_filtered(affygene,probeset1,probeset2,p,y,ref_splice_val,x):
### Get raw expression values for each permuted group for the two probesets
b1,e1 = permute_dI(array_raw_group_values[probeset1],p)
try: b2,e2 = permute_dI(array_raw_group_values[probeset2],p)
except IndexError: print probeset2, array_raw_group_values[probeset2],p; kill
### Get the average constitutive expression values (averaged per-sample across probesets) for each permuted group
try: bc,ec = permute_dI(avg_const_exp_db[affygene],p)
except IndexError: print affygene, avg_const_exp_db[affygene],p; kill
if factor_out_expression_changes == 'no':
ec = bc
### Analyze the averaged ratio's of junction expression relative to permuted constitutive expression
try: p_splice_val = abs(statistics.aspire_stringent(b1/bc,e1/ec,b2/bc,e2/ec)) ### This the permuted ASPIRE score
except Exception: p_splice_val = 0
#print p_splice_val, ref_splice_val, probeset1, probeset2, affygene; dog
if y == 0: ###The first permutation is always the real one
### Grab the absolute number with small number of decimal places
try:
new_ref_splice_val = str(p_splice_val); new_ref_splice_val = float(new_ref_splice_val[0:8])
ref_splice_val = str(abs(ref_splice_val)); ref_splice_val = float(ref_splice_val[0:8]); y += 1
except ValueError:
###Only get this error if your ref_splice_val is a null
print y, probeset1, probeset2; print ref_splice_val, new_ref_splice_val, p
print b1/bc,e1/ec,b2/bc,e2/ec; print (b1/bc)/(e1/ec), (b2/bc)/(e2/ec)
print x[7],x[8],x[9],x[10]; kill
return p_splice_val
def permute_samples(a,p):
baseline = []; experimental = []
for p_index in p[0]:
baseline.append(a[p_index]) ###Append expression values for each permuted list
for p_index in p[1]:
experimental.append(a[p_index])
return baseline, experimental
def permute_dI(all_samples,p):
baseline, experimental = permute_samples(all_samples,p)
#if get_non_log_avg == 'no':
gb = statistics.avg(baseline); ge = statistics.avg(experimental) ###Group avg baseline, group avg experimental value
gb = statistics.log_fold_conversion_fraction(gb); ge = statistics.log_fold_conversion_fraction(ge)
#else:
#baseline = statistics.log_fold_conversion_fraction(baseline); experimental = statistics.log_fold_conversion_fraction(experimental)
#gb = statistics.avg(baseline); ge = statistics.avg(experimental) ###Group avg baseline, group avg experimental value
return gb,ge
def format_exon_functional_attributes(affygene,critical_probeset_list,functional_attribute_db,up_exon_list,down_exon_list,protein_length_list):
### Add functional attributes
functional_attribute_list2=[]
new_functional_attribute_str=''
new_seq_attribute_str=''
new_functional_attribute_list=[]
if array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null': critical_probesets = critical_probeset_list[0]
else: critical_probesets = tuple(critical_probeset_list)
key = affygene,critical_probesets
if key in functional_attribute_db:
###Grab exon IDs corresponding to the critical probesets
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
try: critical_exons = regulated_exon_junction_db[critical_probesets].CriticalExons() ###For junction arrays
except Exception: print key, functional_attribute_db[key];kill
else: critical_exons = [exon_db[critical_probesets].ExonID()] ###For exon arrays
for exon in critical_exons:
for entry in functional_attribute_db[key]:
x = 0
functional_attribute = entry[0]
call = entry[1] # +, -, or ~
if ('AA:' in functional_attribute) or ('ref' in functional_attribute):
x = 1
if exon in up_exon_list:
### design logic to determine whether up or down regulation promotes the functional change (e.g. NMD)
if 'ref' in functional_attribute:
new_functional_attribute = '(~)'+functional_attribute
data_tuple = new_functional_attribute,exon
elif call == '+' or call == '~':
new_functional_attribute = '(+)'+functional_attribute
data_tuple = new_functional_attribute,exon
elif call == '-':
new_functional_attribute = '(-)'+functional_attribute
data_tuple = new_functional_attribute,exon
if 'AA:' in functional_attribute and '?' not in functional_attribute:
functional_attribute_temp = functional_attribute[3:]
if call == '+' or call == '~':
val1,val2 = string.split(functional_attribute_temp,'->')
else:
val2,val1 = string.split(functional_attribute_temp,'->')
val1,null = string.split(val1,'(')
val2,null = string.split(val2,'(')
protein_length_list.append([val1,val2])
elif exon in down_exon_list:
if 'ref' in functional_attribute:
new_functional_attribute = '(~)'+functional_attribute
data_tuple = new_functional_attribute,exon
elif call == '+' or call == '~':
new_functional_attribute = '(-)'+functional_attribute
data_tuple = new_functional_attribute,exon
elif call == '-':
new_functional_attribute = '(+)'+functional_attribute
data_tuple = new_functional_attribute,exon
if 'AA:' in functional_attribute and '?' not in functional_attribute:
functional_attribute_temp = functional_attribute[3:]
if call == '+' or call == '~':
val2,val1 = string.split(functional_attribute_temp,'->')
else:
val1,val2 = string.split(functional_attribute_temp,'->')
val1,null = string.split(val1,'(')
val2,null = string.split(val2,'(')
protein_length_list.append([val1,val2])
if x == 0 or (exclude_protein_details != 'yes'):
try: new_functional_attribute_list.append(new_functional_attribute)
except UnboundLocalError:
print entry
print up_exon_list,down_exon_list
print exon, critical_exons
print critical_probesets, (key, affygene,critical_probesets)
for i in functional_attribute_db:
print i, functional_attribute_db[i]; kill
###remove protein sequence prediction_data
if 'sequence' not in data_tuple[0]:
if x == 0 or exclude_protein_details == 'no':
functional_attribute_list2.append(data_tuple)
###Get rid of duplicates, but maintain non-alphabetical order
new_functional_attribute_list2=[]
for entry in new_functional_attribute_list:
if entry not in new_functional_attribute_list2:
new_functional_attribute_list2.append(entry)
new_functional_attribute_list = new_functional_attribute_list2
#new_functional_attribute_list = unique.unique(new_functional_attribute_list)
#new_functional_attribute_list.sort()
for entry in new_functional_attribute_list:
if 'sequence' in entry: new_seq_attribute_str = new_seq_attribute_str + entry + ','
else: new_functional_attribute_str = new_functional_attribute_str + entry + ','
new_seq_attribute_str = new_seq_attribute_str[0:-1]
new_functional_attribute_str = new_functional_attribute_str[0:-1]
return new_functional_attribute_str, functional_attribute_list2, new_seq_attribute_str,protein_length_list
def grab_summary_dataset_annotations(functional_attribute_db,comparison_db,include_truncation_results_specifically):
###If a second filtering database present, filter the 1st database based on protein length changes
fa_db={}; cp_db={} ###index the geneids for efficient recall in the next segment of code
for (affygene,annotation) in functional_attribute_db:
try: fa_db[affygene].append(annotation)
except KeyError: fa_db[affygene]= [annotation]
for (affygene,annotation) in comparison_db:
try: cp_db[affygene].append(annotation)
except KeyError: cp_db[affygene]= [annotation]
functional_attribute_db_exclude = {}
for affygene in fa_db:
if affygene in cp_db:
for annotation2 in cp_db[affygene]:
if ('trunc' in annotation2) or ('frag' in annotation2) or ('NMDs' in annotation2):
try: functional_attribute_db_exclude[affygene].append(annotation2)
except KeyError: functional_attribute_db_exclude[affygene] = [annotation2]
functional_annotation_db = {}
for (affygene,annotation) in functional_attribute_db:
### if we wish to filter the 1st database based on protein length changes
if affygene not in functional_attribute_db_exclude:
try: functional_annotation_db[annotation] += 1
except KeyError: functional_annotation_db[annotation] = 1
elif include_truncation_results_specifically == 'yes':
for annotation_val in functional_attribute_db_exclude[affygene]:
try: functional_annotation_db[annotation_val] += 1
except KeyError: functional_annotation_db[annotation_val] = 1
annotation_list = []
annotation_list_ranked = []
for annotation in functional_annotation_db:
if 'micro' not in annotation:
count = functional_annotation_db[annotation]
annotation_list.append((annotation,count))
annotation_list_ranked.append((count,annotation))
annotation_list_ranked.sort(); annotation_list_ranked.reverse()
return annotation_list, annotation_list_ranked
def reorganize_attribute_entries(attribute_db1,build_attribute_direction_databases):
attribute_db2 = {}; inclusion_attributes_hit_count={}; exclusion_attributes_hit_count={}
genes_with_inclusion_attributes={}; genes_with_exclusion_attributes={};
###This database has unique gene, attribute information. No attribute will now be represented more than once per gene
for key in attribute_db1:
###Make gene the key and attribute (functional elements or protein information), along with the associated exons the values
affygene = key[0];exon_attribute = key[1];exon_list = attribute_db1[key]
exon_list = unique.unique(exon_list);exon_list.sort()
attribute_exon_info = exon_attribute,exon_list #e.g. 5'UTR, [E1,E2,E3]
try: attribute_db2[affygene].append(attribute_exon_info)
except KeyError: attribute_db2[affygene] = [attribute_exon_info]
###Separate out attribute data by direction for over-representation analysis
if build_attribute_direction_databases == 'yes':
direction=exon_attribute[1:2];unique_gene_attribute=exon_attribute[3:]
if direction == '+':
try: inclusion_attributes_hit_count[unique_gene_attribute].append(affygene)
except KeyError: inclusion_attributes_hit_count[unique_gene_attribute] = [affygene]
genes_with_inclusion_attributes[affygene]=[]
if direction == '-':
try: exclusion_attributes_hit_count[unique_gene_attribute].append(affygene)
except KeyError: exclusion_attributes_hit_count[unique_gene_attribute] = [affygene]
genes_with_exclusion_attributes[affygene]=[]
inclusion_attributes_hit_count = eliminate_redundant_dict_values(inclusion_attributes_hit_count)
exclusion_attributes_hit_count = eliminate_redundant_dict_values(exclusion_attributes_hit_count)
"""for key in inclusion_attributes_hit_count:
inclusion_attributes_hit_count[key] = len(inclusion_attributes_hit_count[key])
for key in exclusion_attributes_hit_count:
exclusion_attributes_hit_count[key] = len(exclusion_attributes_hit_count[key])"""
if build_attribute_direction_databases == 'yes': return attribute_db2,inclusion_attributes_hit_count,genes_with_inclusion_attributes,exclusion_attributes_hit_count,genes_with_exclusion_attributes
else: return attribute_db2
########### Misc. Functions ###########
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def add_a_space(string):
if len(string)<1:
string = ' '
return string
def convertToLog2(data_list):
return map(lambda x: math.log(float(x), 2), data_list)
def addGlobalFudgeFactor(data_list,data_type):
new_list = []
if data_type == 'log':
for item in data_list:
new_item = statistics.log_fold_conversion_fraction(item)
new_list.append(float(new_item) + global_addition_factor)
new_list = convertToLog2(new_list)
else:
for item in data_list: new_list.append(float(item) + global_addition_factor)
return new_list
def copyDirectoryPDFs(root_dir,AS='AS'):
directories = ['AltResults/AlternativeOutputDirectoryDescription.pdf',
'AltResultsDirectoryDescription.pdf',
'ClusteringDirectoryDescription.pdf',
'ExpressionInputDirectoryDescription.pdf',
'ExpressionOutputDirectoryDescription.pdf',
'GO-Elite/GO-Elite_resultsDirectoryDescription.pdf',
'GO-EliteDirectoryDescription.pdf',
'RootDirectoryDescription.pdf']
import shutil
for dir in directories:
file = string.split(dir,'/')[-1]
proceed=True
if 'AltResult' in dir and AS!='AS': proceed=False
if proceed:
try: shutil.copyfile(filepath('Documentation/DirectoryDescription/'+file), filepath(root_dir+dir))
except Exception: pass
def restrictProbesets(dataset_name):
### Take a file with probesets and only perform the splicing-analysis on these (e.g. those already identified from a previous run with a specific pattern)
### Allows for propper denominator when calculating z-scores for microRNA and protein-domain ORA
probeset_list_filename = import_dir = '/AltDatabaseNoVersion/filtering'; filtered_probeset_db={}
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
try:
dir_list = read_directory(import_dir)
fn_dir = filepath(import_dir[1:])
except Exception: dir_list=[]; fn_dir=''
if len(dir_list)>0:
for file in dir_list:
if file[:-4] in dataset_name:
fn = fn_dir+'/'+file; fn = string.replace(fn,'AltDatabase','AltDatabaseNoVersion')
filtered_probeset_db = importGeneric(fn)
print len(filtered_probeset_db), id_name,"will be used to restrict analysis..."
return filtered_probeset_db
def RunAltAnalyze():
#print altanalyze_files
#print '!!!!!starting to run alt-exon analysis'
#returnLargeGlobalVars()
global annotate_db; annotate_db={}; global splice_event_list; splice_event_list=[]; residuals_dirlist=[]
global dataset_name; global constitutive_probeset_db; global exon_db; dir_list2=[]; import_dir2=''
if array_type == 'AltMouse': import_dir = root_dir+'AltExpression/'+array_type
elif array_type == 'exon':
import_dir = root_dir+'AltExpression/ExonArray/'+species+'/'
elif array_type == 'gene':
import_dir = root_dir+'AltExpression/GeneArray/'+species+'/'
elif array_type == 'junction':
import_dir = root_dir+'AltExpression/JunctionArray/'+species+'/'
else:
import_dir = root_dir+'AltExpression/'+array_type+'/'+species+'/'
#if analysis_method == 'ASPIRE' or analysis_method == 'linearregres' or analysis_method == 'splicing-index':
if array_type != 'AltMouse': gene_annotation_file = "AltDatabase/ensembl/"+species+"/"+species+"_Ensembl-annotations.txt"
else: gene_annotation_file = "AltDatabase/"+species+"/"+array_type+"/"+array_type+"_gene_annotations.txt"
annotate_db = ExonAnalyze_module.import_annotations(gene_annotation_file,array_type)
###Import probe-level associations
exon_db={}; filtered_arrayids={};filter_status='no'
try: constitutive_probeset_db,exon_db,genes_being_analyzed = importSplicingAnnotationDatabase(probeset_annotations_file,array_type,filtered_arrayids,filter_status)
except IOError:
print_out = 'The annotation database: \n'+probeset_annotations_file+'\nwas not found. Ensure this file was not deleted and that the correct species has been selected.'
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
print traceback.format_exc()
badExit()
run=0
### Occurs when analyzing multiple conditions rather than performing a simple pair-wise comparison
if run_from_scratch == 'Annotate External Results': import_dir = root_dir
elif analyze_all_conditions == 'all groups':
import_dir = string.replace(import_dir,'AltExpression','AltExpression/FullDatasets')
if array_type == 'AltMouse':
import_dir = string.replace(import_dir,'FullDatasets/AltMouse','FullDatasets/AltMouse/Mm')
elif analyze_all_conditions == 'both':
import_dir2 = string.replace(import_dir,'AltExpression','AltExpression/FullDatasets')
if array_type == 'AltMouse':
import_dir2 = string.replace(import_dir2,'FullDatasets/AltMouse','FullDatasets/AltMouse/Mm')
try: dir_list2 = read_directory(import_dir2) #send a sub_directory to a function to identify all files in a directory
except Exception:
try:
if array_type == 'exon': array_type_dir = 'ExonArray'
elif array_type == 'gene': array_type_dir = 'GeneArray'
elif array_type == 'junction': array_type_dir = 'GeneArray'
else: array_type_dir = array_type
import_dir2 = string.replace(import_dir2,'AltExpression/'+array_type_dir+'/'+species+'/','')
import_dir2 = string.replace(import_dir2,'AltExpression/'+array_type_dir+'/','');
dir_list2 = read_directory(import_dir2)
except Exception:
print_out = 'The expression files were not found. Please make\nsure you selected the correct species and array type.\n\nselected species: '+species+'\nselected array type: '+array_type+'\nselected directory:'+import_dir2
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
print traceback.format_exc()
badExit()
try: dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
except Exception:
try:
if array_type == 'exon': array_type_dir = 'ExonArray'
elif array_type == 'gene': array_type_dir = 'GeneArray'
elif array_type == 'junction': array_type_dir = 'JunctionArray'
else: array_type_dir = array_type
import_dir = string.replace(import_dir,'AltExpression/'+array_type_dir+'/'+species+'/','')
import_dir = string.replace(import_dir,'AltExpression/'+array_type_dir+'/','');
try: dir_list = read_directory(import_dir)
except Exception:
import_dir = root_dir
dir_list = read_directory(root_dir) ### Occurs when reading in an AltAnalyze filtered file under certain conditions
except Exception:
print_out = 'The expression files were not found. Please make\nsure you selected the correct species and array type.\n\nselected species: '+species+'\nselected array type: '+array_type+'\nselected directory:'+import_dir
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out
print traceback.format_exc()
badExit()
dir_list+=dir_list2
### Capture the corresponding files in the residual dir to make sure these files exist for all comparisons - won't if FIRMA was run on some files
if analysis_method == 'FIRMA':
try:
residual_dir = root_dir+'AltExpression/FIRMA/residuals/'+array_type+'/'+species+'/'
residuals_dirlist = read_directory(residual_dir)
except Exception: null=[]
try:
residual_dir = root_dir+'AltExpression/FIRMA/FullDatasets/'+array_type+'/'+species+'/'
residuals_dirlist += read_directory(residual_dir)
except Exception: null=[]
dir_list_verified=[]
for file in residuals_dirlist:
for filename in dir_list:
if file[:-4] in filename: dir_list_verified.append(filename)
dir_list = unique.unique(dir_list_verified)
junction_biotype = 'no'
if array_type == 'RNASeq':
### Check to see if user data includes junctions or just exons
for probeset in exon_db:
if '-' in probeset: junction_biotype = 'yes'; break
if junction_biotype == 'no' and analysis_method != 'splicing-index' and array_type == 'RNASeq':
dir_list=[] ### DON'T RUN ALTANALYZE WHEN JUST ANALYZING EXON DATA
print 'No junction data to summarize... proceeding with exon analysis\n'
elif len(dir_list)==0:
print_out = 'No expression files available in the input directory:\n'+root_dir
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
dir_list = filterAltExpressionFiles(dir_list,altanalyze_files) ### Looks to see if the AltExpression files are for this run or from an older run
for altanalyze_input in dir_list: #loop through each file in the directory to output results
###Import probe-level associations
if 'cel_files' in altanalyze_input:
print_out = 'The AltExpression directory containing the necessary import file(s) is missing. Please verify the correct parameters and input directory were selected. If this error persists, contact us.'
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
if run>0: ### Only re-set these databases after the run when batch analysing multiple files
exon_db={}; filtered_arrayids={};filter_status='no' ###Use this as a means to save memory (import multiple times - only storing different types relevant information)
constitutive_probeset_db,exon_db,genes_being_analyzed = importSplicingAnnotationDatabase(probeset_annotations_file,array_type,filtered_arrayids,filter_status)
if altanalyze_input in dir_list2: dataset_dir = import_dir2 +'/'+ altanalyze_input ### Then not a pairwise comparison
else: dataset_dir = import_dir +'/'+ altanalyze_input
dataset_name = altanalyze_input[:-4] + '-'
print "Beginning to process",dataset_name[0:-1]
### If the user want's to restrict the analysis to preselected probesets (e.g., limma or FIRMA analysis selected)
global filtered_probeset_db; filtered_probeset_db={}
try: filtered_probeset_db = restrictProbesets(dataset_name)
except Exception: null=[]
if run_from_scratch != 'Annotate External Results':
###Import expression data and stats and filter the expression data based on fold and p-value OR expression threshold
try: conditions,adj_fold_dbase,nonlog_NI_db,dataset_name,gene_expression_diff_db,midas_db,ex_db,si_db = performExpressionAnalysis(dataset_dir,constitutive_probeset_db,exon_db,annotate_db,dataset_name)
except IOError:
#except Exception,exception:
#print exception
print traceback.format_exc()
print_out = 'The AltAnalyze filtered expression file "'+dataset_name+'" is not propperly formatted. Review formatting requirements if this file was created by another application.'
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
else:
conditions = 0; adj_fold_dbase={}; nonlog_NI_db={}; gene_expression_diff_db={}; ex_db={}; si_db={}
defineEmptyExpressionVars(exon_db); adj_fold_dbase = original_fold_dbase
###Run Analysis
summary_results_db, summary_results_db2, aspire_output, aspire_output_gene, number_events_analyzed = splicingAnalysisAlgorithms(nonlog_NI_db,adj_fold_dbase,dataset_name,gene_expression_diff_db,exon_db,ex_db,si_db,dataset_dir)
aspire_output_list.append(aspire_output); aspire_output_gene_list.append(aspire_output_gene)
try: clearObjectsFromMemory(exon_db); clearObjectsFromMemory(constitutive_probeset_db); constitutive_probeset_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(last_exon_region_db);last_exon_region_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(adj_fold_dbase);adj_fold_dbase=[]; clearObjectsFromMemory(nonlog_NI_db);nonlog_NI_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(gene_expression_diff_db);gene_expression_diff_db=[]; clearObjectsFromMemory(midas_db);midas_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(ex_db);ex_db=[]; clearObjectsFromMemory(si_db);si_db=[]
except Exception: null=[]
try: run+=1
except Exception: run = 1
if run>0: ###run = 0 if no filtered expression data present
try: return summary_results_db, aspire_output_gene_list, number_events_analyzed
except Exception:
print_out = 'AltAnalyze was unable to find an expression dataset to analyze in:\n',import_dir,'\nor\n',import_dir2,'\nPlease re-run and select a valid input directory.'
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
else:
try: clearObjectsFromMemory(exon_db); clearObjectsFromMemory(constitutive_probeset_db); constitutive_probeset_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(last_exon_region_db);last_exon_region_db=[]
except Exception: null=[]
return None
def filterAltExpressionFiles(dir_list,current_files):
dir_list2=[]
try:
if len(current_files) == 0: current_files = dir_list ###if no filenames input
for altanalzye_input in dir_list: #loop through each file in the directory to output results
if altanalzye_input in current_files:
dir_list2.append(altanalzye_input)
dir_list = dir_list2
except Exception: dir_list = dir_list
return dir_list
def defineEmptyExpressionVars(exon_db):
global fold_dbase; fold_dbase={}; global original_fold_dbase; global critical_exon_db; critical_exon_db={}
global midas_db; midas_db = {}; global max_replicates; global equal_replicates; max_replicates=0; equal_replicates=0
for probeset in exon_db: fold_dbase[probeset]='',''
original_fold_dbase = fold_dbase
def universalPrintFunction(print_items):
log_report = open(log_file,'a')
for item in print_items:
if commandLineMode == 'no': ### Command-line has it's own log file write method (Logger)
log_report.write(item+'\n')
else: print item
log_report.close()
class StatusWindow:
def __init__(self,root,expr_var,alt_var,goelite_var,additional_var,exp_file_location_db):
root.title('AltAnalyze version 2.1.0')
statusVar = StringVar() ### Class method for Tkinter. Description: "Value holder for strings variables."
self.root = root
height = 450; width = 500
if os.name != 'nt': height = 500; width = 600
self.sf = PmwFreeze.ScrolledFrame(root,
labelpos = 'n', label_text = 'Results Status Window',
usehullsize = 1, hull_width = width, hull_height = height)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = 'Output')
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
Label(group.interior(),width=190,height=552,justify=LEFT, bg='black', fg = 'white',anchor=NW,padx = 5,pady = 5, textvariable=statusVar).pack(fill=X,expand=Y)
status = StringVarFile(statusVar,root) ### Likely captures the stdout
sys.stdout = status
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]; fl.setSTDOUT(sys.stdout)
root.after(100, AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db, root))
try:
root.protocol("WM_DELETE_WINDOW", self.deleteWindow)
root.mainloop()
except Exception: pass
def deleteWindow(self):
try: self.root.destroy()
except Exception: pass
def quit(self):
try:
self.root.quit()
self.root.destroy()
except Exception: pass
sys.exit()
def exportComparisonSummary(dataset_name,summary_data_dbase,return_type):
log_report = open(log_file,'a')
result_list=[]
for key in summary_data_dbase:
if key != 'QC': ### The value is a list of strings
summary_data_dbase[key] = str(summary_data_dbase[key])
d = 'Dataset name: '+ dataset_name[:-1]; result_list.append(d+'\n')
d = summary_data_dbase['gene_assayed']+':\tAll genes examined'; result_list.append(d)
d = summary_data_dbase['denominator_exp_genes']+':\tExpressed genes examined for AS'; result_list.append(d)
if explicit_data_type == 'exon-only':
d = summary_data_dbase['alt_events']+':\tAlternatively regulated probesets'; result_list.append(d)
d = summary_data_dbase['denominator_exp_events']+':\tExpressed probesets examined'; result_list.append(d)
elif (array_type == 'AltMouse' or array_type == 'junction' or array_type == 'RNASeq') and (explicit_data_type == 'null' or return_type == 'print'):
d = summary_data_dbase['alt_events']+':\tAlternatively regulated junction-pairs'; result_list.append(d)
d = summary_data_dbase['denominator_exp_events']+':\tExpressed junction-pairs examined'; result_list.append(d)
else:
d = summary_data_dbase['alt_events']+':\tAlternatively regulated probesets'; result_list.append(d)
d = summary_data_dbase['denominator_exp_events']+':\tExpressed probesets examined'; result_list.append(d)
d = summary_data_dbase['alt_genes']+':\tAlternatively regulated genes (ARGs)'; result_list.append(d)
d = summary_data_dbase['direct_domain_genes']+':\tARGs - overlaping with domain/motifs'; result_list.append(d)
d = summary_data_dbase['miRNA_gene_hits']+':\tARGs - overlaping with microRNA binding sites'; result_list.append(d)
result_list2=[]
for d in result_list:
if explicit_data_type == 'exon-only': d = string.replace(d,'probeset','exon')
elif array_type == 'RNASeq': d = string.replace(d,'probeset','junction')
result_list2.append(d)
result_list = result_list2
if return_type == 'log':
for d in result_list: log_report.write(d+'\n')
log_report.write('\n')
log_report.close()
return result_list
class SummaryResultsWindow:
def __init__(self,tl,analysis_type,output_dir,dataset_name,output_type,summary_data_dbase):
def showLink(event):
try:
idx = int(event.widget.tag_names(CURRENT)[1]) ### This is just the index provided below (e.g., str(0))
#print [self.LINKS[idx]]
if 'http://' in self.LINKS[idx]:
webbrowser.open(self.LINKS[idx])
elif self.LINKS[idx][-1] == '/':
self.openSuppliedDirectory(self.LINKS[idx])
else:
### Instead of using this option to open a hyperlink (which is what it should do), we can open another Tk window
try: self.viewPNGFile(self.LINKS[idx]) ### ImageTK PNG viewer
except Exception:
try: self.ShowImageMPL(self.LINKS[idx]) ### MatPlotLib based dispaly
except Exception:
self.openPNGImage(self.LINKS[idx]) ### Native OS PNG viewer
#self.DisplayPlots(self.LINKS[idx]) ### GIF based dispaly
except Exception:
null=[] ### anomalous error
self.emergency_exit = False
self.LINKS = []
self.tl = tl
self.tl.title('AltAnalyze version 2.1.0')
self.analysis_type = analysis_type
filename = 'Config/icon.gif'
fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(tl); can.pack(side='top'); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
use_scroll = 'yes'
try: runGOElite = run_GOElite
except Exception: runGOElite='decide_later'
if 'QC' in summary_data_dbase:
graphic_links = summary_data_dbase['QC'] ### contains hyperlinks to QC and Clustering plots
if len(graphic_links)==0: del summary_data_dbase['QC'] ### This can be added if an analysis fails
else:
graphic_links = []
label_text_str = 'AltAnalyze Result Summary'; height = 150; width = 500
if analysis_type == 'AS' or 'QC' in summary_data_dbase: height = 330
if analysis_type == 'AS' and 'QC' in summary_data_dbase: height = 330
self.sf = PmwFreeze.ScrolledFrame(tl,
labelpos = 'n', label_text = label_text_str,
usehullsize = 1, hull_width = width, hull_height = height)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
txt=Text(self.frame,bg='gray',width=150, height=80)
txt.pack(expand=True, fill="both")
#txt.insert(END, 'Primary Analysis Finished....\n')
txt.insert(END, 'Results saved to:\n'+output_dir+'\n')
f = Font(family="System", size=12, weight="bold")
txt.tag_config("font", font=f)
i=0
copyDirectoryPDFs(output_dir,AS=analysis_type)
if analysis_type == 'AS':
txt.insert(END, '\n')
result_list = exportComparisonSummary(dataset_name,summary_data_dbase,'print')
for d in result_list: txt.insert(END, d+'\n')
if 'QC' in summary_data_dbase and len(graphic_links)>0:
txt.insert(END, '\nQC and Expression Clustering Plots',"font")
txt.insert(END, '\n\n 1) ')
for (name,file_dir) in graphic_links:
txt.insert(END, name, ('link', str(i)))
if len(graphic_links) > (i+1):
txt.insert(END, '\n %s) ' % str(i+2))
self.LINKS.append(file_dir)
i+=1
txt.insert(END, '\n\nView all primary plots in the folder ')
txt.insert(END, 'DataPlots',('link', str(i))); i+=1
self.LINKS.append(output_dir+'DataPlots/')
else:
url = 'http://code.google.com/p/altanalyze/'
self.LINKS=(url,'')
txt.insert(END, '\nFor more information see the ')
txt.insert(END, "AltAnalyze Online Help", ('link', str(0)))
txt.insert(END, '\n\n')
if runGOElite == 'run-immediately':
txt.insert(END, '\n\nView all pathway enrichment results in the folder ')
txt.insert(END, 'GO-Elite',('link', str(i))); i+=1
self.LINKS.append(output_dir+'GO-Elite/')
if analysis_type == 'AS':
txt.insert(END, '\n\nView all splicing plots in the folder ')
txt.insert(END, 'ExonPlots',('link', str(i))); i+=1
try: self.LINKS.append(output_dir+'ExonPlots/')
except Exception: pass
txt.tag_config('link', foreground="blue", underline = 1)
txt.tag_bind('link', '<Button-1>', showLink)
txt.insert(END, '\n\n')
open_results_folder = Button(tl, text = 'Results Folder', command = self.openDirectory)
open_results_folder.pack(side = 'left', padx = 5, pady = 5);
if analysis_type == 'AS':
#self.dg_url = 'http://www.altanalyze.org/domaingraph.htm'
self.dg_url = 'http://www.altanalyze.org/domaingraph.htm'
dg_pdf_file = 'Documentation/domain_graph.pdf'; dg_pdf_file = filepath(dg_pdf_file); self.dg_pdf_file = dg_pdf_file
text_button = Button(tl, text='Start DomainGraph in Cytoscape', command=self.SelectCytoscapeTopLevel)
text_button.pack(side = 'right', padx = 5, pady = 5)
self.output_dir = output_dir + "AltResults"
self.whatNext_url = 'http://code.google.com/p/altanalyze/wiki/AnalyzingASResults' #http://www.altanalyze.org/what_next_altexon.htm'
whatNext_pdf = 'Documentation/what_next_alt_exon.pdf'; whatNext_pdf = filepath(whatNext_pdf); self.whatNext_pdf = whatNext_pdf
if output_type == 'parent': self.output_dir = output_dir ###Used for fake datasets
else:
if pathway_permutations == 'NA':
self.output_dir = output_dir + "ExpressionOutput"
else: self.output_dir = output_dir
self.whatNext_url = 'http://code.google.com/p/altanalyze/wiki/AnalyzingGEResults' #'http://www.altanalyze.org/what_next_expression.htm'
whatNext_pdf = 'Documentation/what_next_GE.pdf'; whatNext_pdf = filepath(whatNext_pdf); self.whatNext_pdf = whatNext_pdf
what_next = Button(tl, text='What Next?', command=self.whatNextlinkout)
what_next.pack(side = 'right', padx = 5, pady = 5)
quit_buttonTL = Button(tl,text='Close View', command=self.close)
quit_buttonTL.pack(side = 'right', padx = 5, pady = 5)
continue_to_next_win = Button(text = 'Continue', command = self.continue_win)
continue_to_next_win.pack(side = 'right', padx = 10, pady = 10)
quit_button = Button(root,text='Quit', command=self.quit)
quit_button.pack(side = 'right', padx = 5, pady = 5)
button_text = 'Help'; help_url = 'http://www.altanalyze.org/help_main.htm'; self.help_url = filepath(help_url)
pdf_help_file = 'Documentation/AltAnalyze-Manual.pdf'; pdf_help_file = filepath(pdf_help_file); self.pdf_help_file = pdf_help_file
help_button = Button(root, text=button_text, command=self.Helplinkout)
help_button.pack(side = 'left', padx = 5, pady = 5)
if self.emergency_exit == False:
self.tl.protocol("WM_DELETE_WINDOW", self.tldeleteWindow)
self.tl.mainloop() ###Needed to show graphic
else:
""" This shouldn't have to be called, but is when the topLevel window isn't closed first
specifically if a PNG file is opened. the sys.exitfunc() should work but doesn't.
work on this more later """
#AltAnalyzeSetup('no')
try: self._tls.quit(); self._tls.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
try: self.tl.quit(); self.tl.destroy()
except Exception: None
try: root.quit(); root.destroy()
except Exception: None
UI.getUpdatedParameters(array_type,species,'Process Expression file',output_dir)
sys.exit() ### required when opening PNG files on Windows to continue (not sure why)
#sys.exitfunc()
def tldeleteWindow(self):
try: self.tl.quit(); self.tl.destroy()
except Exception: self.tl.destroy()
def deleteTLWindow(self):
self.emergency_exit = True
try: self._tls.quit(); self._tls.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
self.tl.quit()
self.tl.destroy()
sys.exitfunc()
def deleteWindow(self):
self.emergency_exit = True
try: self._tls.quit(); self._tls.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
try:
self.tl.quit()
self.tl.destroy()
except Exception: None
sys.exitfunc()
def continue_win(self):
self.emergency_exit = True
try: self._tls.quit(); self._tls.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
try: self.tl.quit(); self.tl.destroy()
except Exception: pass
root.quit()
root.destroy()
try: self.tl.grid_forget()
except Exception: None
try: root.grid_forget()
except Exception: None
sys.exitfunc()
def openDirectory(self):
if os.name == 'nt':
try: os.startfile('"'+self.output_dir+'"')
except Exception: os.system('open "'+self.output_dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+self.output_dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+self.output_dir+'/"')
def openSuppliedDirectory(self,dir):
if os.name == 'nt':
try: os.startfile('"'+self.output_dir+'"')
except Exception: os.system('open "'+dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+dir+'/"')
def DGlinkout(self):
try:
altanalyze_path = filepath('') ### Find AltAnalye's path
altanalyze_path = altanalyze_path[:-1]
except Exception: null=[]
if os.name == 'nt':
parent_dir = 'C:/Program Files'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform:
parent_dir = '/Applications'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.app'
elif 'linux' in sys.platform:
parent_dir = '/opt'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape'
try: openCytoscape(altanalyze_path,application_dir,application_name)
except Exception: null=[]
try: self._tls.destroy()
except Exception: None
try: ###Remove this cytoscape as the default
file_location_defaults = UI.importDefaultFileLocations()
del file_location_defaults['CytoscapeDir']
UI.exportDefaultFileLocations(file_location_defaults)
except Exception: null=[]
self.GetHelpTopLevel(self.dg_url,self.dg_pdf_file)
def Helplinkout(self): self.GetHelpTopLevel(self.help_url,self.pdf_help_file)
def whatNextlinkout(self): self.GetHelpTopLevel(self.whatNext_url,self.whatNext_pdf)
def ShowImageMPL(self,file_location):
""" Visualization method using MatPlotLib """
try:
import matplotlib
import matplotlib.pyplot as pylab
except Exception:
#print 'Graphical output mode disabled (requires matplotlib, numpy and scipy)'
None
fig = pylab.figure()
pylab.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.00) ### Fill the plot area left to right
ax = fig.add_subplot(111)
ax.set_xticks([]) ### Hides ticks
ax.set_yticks([])
img= pylab.imread(file_location)
imgplot = pylab.imshow(img)
pylab.show()
def viewPNGFile(self,png_file_dir):
""" View PNG file within a PMW Tkinter frame """
import ImageTk
tlx = Toplevel(); self._tlx = tlx
sf = PmwFreeze.ScrolledFrame(tlx, labelpos = 'n', label_text = '',
usehullsize = 1, hull_width = 800, hull_height = 550)
sf.pack(padx = 0, pady = 0, fill = 'both', expand = 1)
frame = sf.interior()
tlx.title(png_file_dir)
img = ImageTk.PhotoImage(file=png_file_dir)
can = Canvas(frame)
can.pack(fill=BOTH, padx = 0, pady = 0)
w = img.width()
h = height=img.height()
can.config(width=w, height=h)
can.create_image(2, 2, image=img, anchor=NW)
tlx.mainloop()
def openPNGImage(self,png_file_dir):
if os.name == 'nt':
try: os.startfile('"'+png_file_dir+'"')
except Exception: os.system('open "'+png_file_dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+png_file_dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+png_file_dir+'"')
def DisplayPlots(self,file_location):
""" Native Tkinter method - Displays a gif file in a standard TopLevel window (nothing fancy) """
tls = Toplevel(); self._tls = tls; nulls = '\t\t\t\t'; tls.title('AltAnalyze Plot Visualization')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos = 'n', label_text = '', usehullsize = 1, hull_width = 520, hull_height = 500)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = file_location)
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
img = PhotoImage(file=filepath(file_location))
can = Canvas(group.interior()); can.pack(side='left',padx = 10, pady = 20); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
tls.mainloop()
def GetHelpTopLevel(self,url,pdf_file):
try:
config_db = UI.importConfigFile()
ask_for_help = config_db['help'] ### hide_selection_option
except Exception: ask_for_help = 'null'; config_db={}
self.pdf_file = pdf_file; self.url = url
if ask_for_help == 'null':
message = ''; self.message = message; self.online_help = 'Online Documentation'; self.pdf_help = 'Local PDF File'
tls = Toplevel(); self._tls = tls; nulls = '\t\t\t\t'; tls.title('Please select one of the options')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos = 'n', label_text = '', usehullsize = 1, hull_width = 320, hull_height = 200)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = 'Options')
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
filename = 'Config/icon.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(group.interior()); can.pack(side='left',padx = 10, pady = 20); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
l1 = Label(group.interior(), text=nulls); l1.pack(side = 'bottom')
text_button2 = Button(group.interior(), text=self.online_help, command=self.openOnlineHelp); text_button2.pack(side = 'top', padx = 5, pady = 5)
try: text_button = Button(group.interior(), text=self.pdf_help, command=self.openPDFHelp); text_button.pack(side = 'top', padx = 5, pady = 5)
except Exception: text_button = Button(group.interior(), text=self.pdf_help, command=self.openPDFHelp); text_button.pack(side = 'top', padx = 5, pady = 5)
text_button3 = Button(group.interior(), text='No Thanks', command=self.skipHelp); text_button3.pack(side = 'top', padx = 5, pady = 5)
c = Checkbutton(group.interior(), text = "Apply these settings each time", command=self.setHelpConfig); c.pack(side = 'bottom', padx = 5, pady = 0)
tls.mainloop()
try: tls.destroy()
except Exception: None
else:
file_location_defaults = UI.importDefaultFileLocations()
try:
help_choice = file_location_defaults['HelpChoice'].Location()
if help_choice == 'PDF': self.openPDFHelp()
elif help_choice == 'http': self.openOnlineHelp()
else: self.skip()
except Exception: self.openPDFHelp() ### Open PDF if there's a problem
def SelectCytoscapeTopLevel(self):
try:
config_db = UI.importConfigFile()
cytoscape_type = config_db['cytoscape'] ### hide_selection_option
except Exception: cytoscape_type = 'null'; config_db={}
if cytoscape_type == 'null':
message = ''; self.message = message
tls = Toplevel(); self._tls = tls; nulls = '\t\t\t\t'; tls.title('Cytoscape Automatic Start Options')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos = 'n', label_text = '', usehullsize = 1, hull_width = 420, hull_height = 200)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = 'Options')
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
filename = 'Config/cyto-logo-smaller.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(group.interior()); can.pack(side='left',padx = 10, pady = 5); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
#"""
self.local_cytoscape = 'AltAnalyze Bundled Version'; self.custom_cytoscape = 'Previously Installed Version'
l1 = Label(group.interior(), text=nulls); l1.pack(side = 'bottom')
l3 = Label(group.interior(), text='Select version of Cytoscape to open:'); l3.pack(side = 'top', pady = 5)
"""
self.local_cytoscape = ' No '; self.custom_cytoscape = ' Yes '
l1 = Label(group.interior(), text=nulls); l1.pack(side = 'bottom')
l2 = Label(group.interior(), text='Note: Cytoscape can take up-to a minute to initalize', fg="red"); l2.pack(side = 'top', padx = 5, pady = 0)
"""
text_button2 = Button(group.interior(), text=self.local_cytoscape, command=self.DGlinkout); text_button2.pack(padx = 5, pady = 5)
try: text_button = Button(group.interior(), text=self.custom_cytoscape, command=self.getPath); text_button.pack(padx = 5, pady = 5)
except Exception: text_button = Button(group.interior(), text=self.custom_cytoscape, command=self.getPath); text_button.pack(padx = 5, pady = 5)
l2 = Label(group.interior(), text='Note: Cytoscape can take up-to a minute to initalize', fg="blue"); l2.pack(side = 'bottom', padx = 5, pady = 0)
c = Checkbutton(group.interior(), text = "Apply these settings each time and don't show again", command=self.setCytoscapeConfig); c.pack(side = 'bottom', padx = 5, pady = 0)
#c2 = Checkbutton(group.interior(), text = "Open PDF of DomainGraph help rather than online help", command=self.setCytoscapeConfig); c2.pack(side = 'bottom', padx = 5, pady = 0)
tls.mainloop()
try: tls.destroy()
except Exception: None
else:
file_location_defaults = UI.importDefaultFileLocations()
try: cytoscape_app_dir = file_location_defaults['CytoscapeDir'].Location(); openFile(cytoscape_app_dir)
except Exception:
try: altanalyze_path = filepath(''); altanalyze_path = altanalyze_path[:-1]
except Exception: altanalyze_path=''
application_dir = 'Cytoscape_v'
if os.name == 'nt': application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform: application_name = 'Cytoscape.app'
elif 'linux' in sys.platform: application_name = 'Cytoscape'
try: openCytoscape(altanalyze_path,application_dir,application_name)
except Exception: null=[]
def setCytoscapeConfig(self):
config_db={}; config_db['cytoscape'] = 'hide_selection_option'
UI.exportConfigFile(config_db)
def setHelpConfig(self):
config_db={}; config_db['help'] = 'hide_selection_option'
UI.exportConfigFile(config_db)
def getPath(self):
file_location_defaults = UI.importDefaultFileLocations()
if os.name == 'nt': parent_dir = 'C:/Program Files'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform: parent_dir = '/Applications'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.app'
elif 'linux' in sys.platform: parent_dir = '/opt'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape'
try:
self.default_dir = file_location_defaults['CytoscapeDir'].Location()
self.default_dir = string.replace(self.default_dir,'//','/')
self.default_dir = string.replace(self.default_dir,'\\','/')
self.default_dir = string.join(string.split(self.default_dir,'/')[:-1],'/')
except Exception:
dir = FindDir(parent_dir,application_dir); dir = filepath(parent_dir+'/'+dir)
self.default_dir = filepath(parent_dir)
try: dirPath = tkFileDialog.askdirectory(parent=self._tls,initialdir=self.default_dir)
except Exception:
self.default_dir = ''
try: dirPath = tkFileDialog.askdirectory(parent=self._tls,initialdir=self.default_dir)
except Exception:
try: dirPath = tkFileDialog.askdirectory(parent=self._tls)
except Exception: dirPath=''
try:
#print [dirPath],application_name
app_dir = dirPath+'/'+application_name
if 'linux' in sys.platform:
try: createCytoscapeDesktop(cytoscape_dir)
except Exception: null=[]
dir_list = unique.read_directory('/usr/bin/') ### Check to see that JAVA is installed
if 'java' not in dir_list: print 'Java not referenced in "usr/bin/. If not installed,\nplease install and re-try opening Cytoscape'
try:
jar_path = dirPath+'/cytoscape.jar'
main_path = dirPath+'/cytoscape.CyMain'
plugins_path = dirPath+'/plugins'
os.system('java -Dswing.aatext=true -Xss5M -Xmx512M -jar '+jar_path+' '+main_path+' -p '+plugins_path+' &')
print 'Cytoscape jar opened:',jar_path
except Exception:
print 'OS command to open Java failed.'
try: openFile(app_dir2); print 'Cytoscape opened:',app_dir2
except Exception: openFile(app_dir)
else: openFile(app_dir)
try: file_location_defaults['CytoscapeDir'].SetLocation(app_dir)
except Exception:
fl = UI.FileLocationData('', app_dir, 'all')
file_location_defaults['CytoscapeDir'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
except Exception: null=[]
try: self._tls.destroy()
except Exception: None
self.GetHelpTopLevel(self.dg_url,self.dg_pdf_file)
def openOnlineHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try:file_location_defaults['HelpChoice'].SetLocation('http')
except Exception:
fl = UI.FileLocationData('', 'http', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
webbrowser.open(self.url)
#except Exception: null=[]
try: self._tls.destroy()
except Exception: None
def skipHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try: file_location_defaults['HelpChoice'].SetLocation('skip')
except Exception:
fl = UI.FileLocationData('', 'skip', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
try: self._tls.destroy()
except Exception: None
def openPDFHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try:file_location_defaults['HelpChoice'].SetLocation('PDF')
except Exception:
fl = UI.FileLocationData('', 'PDF', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
if os.name == 'nt':
try: os.startfile('"'+self.pdf_file+'"')
except Exception: os.system('open "'+self.pdf_file+'"')
elif 'darwin' in sys.platform: os.system('open "'+self.pdf_file+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+self.pdf_file+'"')
try: self._tls.destroy()
except Exception: None
def quit(self):
root.quit()
root.destroy()
sys.exit()
def close(self):
#self.tl.quit() #### This was causing multiple errors in 2.0.7 - evaluate more!
self.tl.destroy()
class StringVarFile:
def __init__(self,stringVar,window):
self.__newline = 0; self.__stringvar = stringVar; self.__window = window
def write(self,s):
try:
log_report = open(log_file,'a')
log_report.write(s); log_report.close() ### Variable to record each print statement
new = self.__stringvar.get()
for c in s:
#if c == '\n': self.__newline = 1
if c == '\k': self.__newline = 1### This should not be found and thus results in a continous feed rather than replacing a single line
else:
if self.__newline: new = ""; self.__newline = 0
new = new+c
self.set(new)
except Exception: pass
def set(self,s):
try: self.__stringvar.set(s); self.__window.update()
except Exception: pass
def get(self):
try:
return self.__stringvar.get()
except Exception: pass
def flush(self):
pass
def timestamp():
import datetime
today = str(datetime.date.today()); today = string.split(today,'-'); today = today[0]+''+today[1]+''+today[2]
time_stamp = string.replace(time.ctime(),':','')
time_stamp = string.replace(time_stamp,' ',' ')
time_stamp = string.split(time_stamp,' ') ###Use a time-stamp as the output dir (minus the day)
time_stamp = today+'-'+time_stamp[3]
return time_stamp
def callWXPython():
import wx
import AltAnalyzeViewer
app = wx.App(False)
AltAnalyzeViewer.remoteViewer(app)
def AltAnalyzeSetup(skip_intro):
global apt_location; global root_dir;global log_file; global summary_data_db; summary_data_db={}; reload(UI)
global probability_statistic; global commandLineMode; commandLineMode = 'no'
if 'remoteViewer' == skip_intro:
if os.name == 'nt':
callWXPython()
elif os.name == 'ntX':
package_path = filepath('python')
win_package_path = string.replace(package_path,'python','AltAnalyzeViewer.exe')
import subprocess
subprocess.call([win_package_path]);sys.exit()
elif os.name == 'posix':
package_path = filepath('python')
#mac_package_path = string.replace(package_path,'python','AltAnalyze.app/Contents/MacOS/python')
#os.system(mac_package_path+' RemoteViewer.py');sys.exit()
mac_package_path = string.replace(package_path,'python','AltAnalyzeViewer.app/Contents/MacOS/AltAnalyzeViewer')
import subprocess
subprocess.call([mac_package_path]);sys.exit()
"""
import threading
import wx
app = wx.PySimpleApp()
t = threading.Thread(target=callWXPython)
t.setDaemon(1)
t.start()
s = 1
queue = mlp.Queue()
proc = mlp.Process(target=callWXPython) ### passing sys.stdout unfortunately doesn't work to pass the Tk string
proc.start()
sys.exit()
"""
reload(UI)
expr_var, alt_var, additional_var, goelite_var, exp_file_location_db = UI.getUserParameters(skip_intro,Multi=mlp)
"""except Exception:
if 'SystemExit' not in str(traceback.format_exc()):
expr_var, alt_var, additional_var, goelite_var, exp_file_location_db = UI.getUserParameters('yes')
else: sys.exit()"""
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
apt_location = fl.APTLocation()
root_dir = fl.RootDir()
try: probability_statistic = fl.ProbabilityStatistic()
except Exception: probability_statistic = 'unpaired t-test'
time_stamp = timestamp()
log_file = filepath(root_dir+'AltAnalyze_report-'+time_stamp+'.log')
log_report = open(log_file,'w'); log_report.close()
if use_Tkinter == 'yes' and debug_mode == 'no':
try:
global root; root = Tk()
StatusWindow(root,expr_var, alt_var, goelite_var, additional_var, exp_file_location_db)
root.destroy()
except Exception, exception:
try:
print traceback.format_exc()
badExit()
except Exception: sys.exit()
else: AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db,'')
def badExit():
print "\n...exiting AltAnalyze due to unexpected error"
try:
time_stamp = timestamp()
print_out = "Unknown error encountered during data processing.\nPlease see logfile in:\n\n"+log_file+"\nand report to genmapp@gladstone.ucsf.edu."
try:
if len(log_file)>0:
if commandLineMode == 'no':
if os.name == 'nt':
try: os.startfile('"'+log_file+'"')
except Exception: os.system('open "'+log_file+'"')
elif 'darwin' in sys.platform: os.system('open "'+log_file+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+log_file+'"')
if commandLineMode == 'no':
try: UI.WarningWindow(print_out,'Error Encountered!'); root.destroy()
except Exception: print print_out
except Exception: sys.exit()
except Exception: sys.exit()
sys.exit()
kill
def AltAnalyzeMain(expr_var,alt_var,goelite_var,additional_var,exp_file_location_db,root):
### Hard-coded defaults
w = 'Agilent'; x = 'Affymetrix'; y = 'Ensembl'; z = 'any'; data_source = y; constitutive_source = z; manufacturer = x ### Constitutive source, is only really paid attention to if Ensembl, otherwise Affymetrix is used (even if default)
### Get default options for ExpressionBuilder and AltAnalyze
start_time = time.time()
test_goelite = 'no'; test_results_pannel = 'no'
global species; global array_type; global expression_data_format; global use_R; use_R = 'no'
global analysis_method; global p_threshold; global filter_probeset_types
global permute_p_threshold; global perform_permutation_analysis; global export_NI_values
global run_MiDAS; global analyze_functional_attributes; global microRNA_prediction_method
global calculate_normIntensity_p; global pathway_permutations; global avg_all_for_ss; global analyze_all_conditions
global remove_intronic_junctions
global agglomerate_inclusion_probesets; global expression_threshold; global factor_out_expression_changes
global only_include_constitutive_containing_genes; global remove_transcriptional_regulated_genes; global add_exons_to_annotations
global exclude_protein_details; global filter_for_AS; global use_direct_domain_alignments_only; global run_from_scratch
global explicit_data_type; explicit_data_type = 'null'
global altanalyze_files; altanalyze_files = []
species,array_type,manufacturer,constitutive_source,dabg_p,raw_expression_threshold,avg_all_for_ss,expression_data_format,include_raw_data, run_from_scratch, perform_alt_analysis = expr_var
analysis_method,p_threshold,filter_probeset_types,alt_exon_fold_variable,gene_expression_cutoff,remove_intronic_junctions,permute_p_threshold,perform_permutation_analysis, export_NI_values, analyze_all_conditions = alt_var
calculate_normIntensity_p, run_MiDAS, use_direct_domain_alignments_only, microRNA_prediction_method, filter_for_AS, additional_algorithms = additional_var
ge_fold_cutoffs,ge_pvalue_cutoffs,ge_ptype,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,pathway_permutations,mod,returnPathways = goelite_var
original_remove_intronic_junctions = remove_intronic_junctions
if run_from_scratch == 'Annotate External Results': analysis_method = 'external'
if returnPathways == 'no' or returnPathways == 'None':
returnPathways = None
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
try: exon_exp_threshold = fl.ExonExpThreshold()
except Exception: exon_exp_threshold = 'NA'
try: gene_exp_threshold = fl.GeneExpThreshold()
except Exception: gene_exp_threshold = 'NA'
try: exon_rpkm_threshold = fl.ExonRPKMThreshold()
except Exception: exon_rpkm_threshold = 'NA'
try: rpkm_threshold = fl.RPKMThreshold() ### Gene-Level
except Exception: rpkm_threshold = 'NA'
fl.setJunctionExpThreshold(raw_expression_threshold) ### For RNA-Seq, this specifically applies to exon-junctions
try: predictGroups = fl.predictGroups()
except Exception: predictGroups = False
try:
if fl.excludeLowExpressionExons(): excludeLowExpExons = 'yes'
else: excludeLowExpExons = 'no'
except Exception: excludeLowExpExons = 'no'
if test_goelite == 'yes': ### It can be difficult to get error warnings from GO-Elite, unless run here
results_dir = filepath(fl.RootDir())
elite_input_dirs = ['AltExonConfirmed','AltExon','regulated','upregulated','downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir+'GO-Elite/'+elite_dir,results_dir+'GO-Elite/denominator',results_dir+'GO-Elite/'+elite_dir
variables = species,mod,pathway_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,root
GO_Elite.remoteAnalysis(variables,'non-UI',Multi=mlp)
global perform_element_permutation_analysis; global permutations
perform_element_permutation_analysis = 'yes'; permutations = 2000
analyze_functional_attributes = 'yes' ### Do this by default (shouldn't substantially increase runtime)
if run_from_scratch != 'Annotate External Results' and (array_type != "3'array" and array_type!='RNASeq'):
if run_from_scratch !='Process AltAnalyze filtered':
try: raw_expression_threshold = float(raw_expression_threshold)
except Exception: raw_expression_threshold = 1
if raw_expression_threshold<1:
raw_expression_threshold = 1
print "Expression threshold < 1, forcing to be a minimum of 1."
try: dabg_p = float(dabg_p)
except Exception: dabg_p = 0
if dabg_p == 0 or dabg_p > 1:
print "Invalid dabg-p value threshold entered,(",dabg_p,") setting to default of 0.05"
dabg_p = 0.05
if use_direct_domain_alignments_only == 'direct-alignment': use_direct_domain_alignments_only = 'yes'
if run_from_scratch == 'Process CEL files': expression_data_format = 'log'
print "Beginning AltAnalyze Analysis... Format:", expression_data_format
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print_items=[]; #print [permute_p_threshold]; sys.exit()
print_items.append("AltAnalyze version 2.1.0 - Expression Analysis Parameters Being Used...")
print_items.append('\t'+'database'+': '+unique.getCurrentGeneDatabaseVersion())
print_items.append('\t'+'species'+': '+species)
print_items.append('\t'+'method'+': '+array_type)
print_items.append('\t'+'manufacturer'+': '+manufacturer)
print_items.append('\t'+'probability_statistic'+': '+probability_statistic)
print_items.append('\t'+'constitutive_source'+': '+constitutive_source)
print_items.append('\t'+'dabg_p'+': '+str(dabg_p))
if array_type == 'RNASeq':
print_items.append('\t'+'junction expression threshold'+': '+str(raw_expression_threshold))
print_items.append('\t'+'exon_exp_threshold'+': '+str(exon_exp_threshold))
print_items.append('\t'+'gene_exp_threshold'+': '+str(gene_exp_threshold))
print_items.append('\t'+'exon_rpkm_threshold'+': '+str(exon_rpkm_threshold))
print_items.append('\t'+'gene_rpkm_threshold'+': '+str(rpkm_threshold))
print_items.append('\t'+'exclude low expressing exons for RPKM'+': '+excludeLowExpExons)
else:
print_items.append('\t'+'raw_expression_threshold'+': '+str(raw_expression_threshold))
print_items.append('\t'+'avg_all_for_ss'+': '+avg_all_for_ss)
print_items.append('\t'+'expression_data_format'+': '+expression_data_format)
print_items.append('\t'+'include_raw_data'+': '+include_raw_data)
print_items.append('\t'+'run_from_scratch'+': '+run_from_scratch)
print_items.append('\t'+'perform_alt_analysis'+': '+perform_alt_analysis)
if avg_all_for_ss == 'yes': cs_type = 'core'
else: cs_type = 'constitutive'
print_items.append('\t'+'calculate_gene_expression_using'+': '+cs_type)
print_items.append("Alternative Exon Analysis Parameters Being Used..." )
print_items.append('\t'+'analysis_method'+': '+analysis_method)
print_items.append('\t'+'p_threshold'+': '+str(p_threshold))
print_items.append('\t'+'filter_data_types'+': '+filter_probeset_types)
print_items.append('\t'+'alt_exon_fold_variable'+': '+str(alt_exon_fold_variable))
print_items.append('\t'+'gene_expression_cutoff'+': '+str(gene_expression_cutoff))
print_items.append('\t'+'remove_intronic_junctions'+': '+remove_intronic_junctions)
print_items.append('\t'+'avg_all_for_ss'+': '+avg_all_for_ss)
print_items.append('\t'+'permute_p_threshold'+': '+str(permute_p_threshold))
print_items.append('\t'+'perform_permutation_analysis'+': '+perform_permutation_analysis)
print_items.append('\t'+'export_NI_values'+': '+export_NI_values)
print_items.append('\t'+'run_MiDAS'+': '+run_MiDAS)
print_items.append('\t'+'use_direct_domain_alignments_only'+': '+use_direct_domain_alignments_only)
print_items.append('\t'+'microRNA_prediction_method'+': '+microRNA_prediction_method)
print_items.append('\t'+'analyze_all_conditions'+': '+analyze_all_conditions)
print_items.append('\t'+'filter_for_AS'+': '+filter_for_AS)
if pathway_permutations == 'NA': run_GOElite = 'decide_later'
else: run_GOElite = 'run-immediately'
print_items.append('\t'+'run_GOElite'+': '+ run_GOElite)
universalPrintFunction(print_items)
if commandLineMode == 'yes': print 'Running command line mode:',commandLineMode
summary_data_db['gene_assayed'] = 0
summary_data_db['denominator_exp_genes']=0
summary_data_db['alt_events'] = 0
summary_data_db['denominator_exp_events'] = 0
summary_data_db['alt_genes'] = 0
summary_data_db['direct_domain_genes'] = 0
summary_data_db['miRNA_gene_denom'] = 0
summary_data_db['miRNA_gene_hits'] = 0
if test_results_pannel == 'yes': ### It can be difficult to get error warnings from GO-Elite, unless run here
graphic_links = []
graphic_links.append(['test','Config/AltAnalyze_structure-RNASeq.jpg'])
summary_data_db['QC']=graphic_links
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
dataset = 'test'; results_dir=''
print "Analysis Complete\n";
if root !='' and root !=None:
UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'GE',results_dir,dataset,'parent',summary_data_db)
root.destroy(); sys.exit()
global export_go_annotations; global aspire_output_list; global aspire_output_gene_list
global filter_probesets_by; global global_addition_factor; global onlyAnalyzeJunctions
global log_fold_cutoff; global aspire_cutoff; global annotation_system; global alt_exon_logfold_cutoff
"""dabg_p = 0.75; data_type = 'expression' ###used for expression analysis when dealing with AltMouse arrays
a = "3'array"; b = "exon"; c = "AltMouse"; e = "custom"; array_type = c
l = 'log'; n = 'non-log'; expression_data_format = l
hs = 'Hs'; mm = 'Mm'; dr = 'Dr'; rn = 'Rn'; species = mm
include_raw_data = 'yes'; expression_threshold = 70 ### Based on suggestion from BMC Genomics. 2006 Dec 27;7:325. PMID: 17192196, for hu-exon 1.0 st array
avg_all_for_ss = 'no' ###Default is 'no' since we don't want all probes averaged for the exon arrays"""
###### Run ExpressionBuilder ######
"""ExpressionBuilder is used to:
(1) extract out gene expression values, provide gene annotations, and calculate summary gene statistics
(2) filter probesets based DABG p-values and export to pair-wise comparison files
(3) build array annotations files matched to gene structure features (e.g. exons, introns) using chromosomal coordinates
options 1-2 are executed in remoteExpressionBuilder and option 3 is by running ExonArrayEnsembl rules"""
try:
additional_algorithm = additional_algorithms.Algorithm()
additional_score = additional_algorithms.Score()
except Exception: additional_algorithm = 'null'; additional_score = 'null'
if analysis_method == 'FIRMA': analyze_metaprobesets = 'yes'
elif additional_algorithm == 'FIRMA': analyze_metaprobesets = 'yes'
else: analyze_metaprobesets = 'no'
### Check to see if this is a real or FAKE (used for demonstration purposes) dataset
if run_from_scratch == 'Process CEL files' or 'Feature Extraction' in run_from_scratch:
for dataset in exp_file_location_db:
if run_from_scratch == 'Process CEL files':
fl = exp_file_location_db[dataset]
pgf_file=fl.InputCDFFile()
results_dir = filepath(fl.RootDir())
if '_demo' in pgf_file: ### Thus we are running demo CEL files and want to quit immediately
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
try:
print "Analysis Complete\n";
if root !='' and root !=None:
UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'AS',results_dir,dataset,'parent',summary_data_db)
except Exception: null=[]
skip_intro = 'yes'
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
reload(UI)
UI.getUpdatedParameters(array_type,species,run_from_scratch,results_dir)
try: AltAnalyzeSetup('no')
except Exception: sys.exit()
if 'CEL files' in run_from_scratch:
import APT
try:
try:
APT.probesetSummarize(exp_file_location_db,analyze_metaprobesets,filter_probeset_types,species,root)
if analyze_metaprobesets == 'yes':
analyze_metaprobesets = 'no' ### Re-run the APT analysis to obtain probeset rather than gene-level results (only the residuals are needed from a metaprobeset run)
APT.probesetSummarize(exp_file_location_db,analyze_metaprobesets,filter_probeset_types,species,root)
except Exception:
import platform
print "Trying to change APT binary access privileges"
for dataset in exp_file_location_db: ### Instance of the Class ExpressionFileLocationData
fl = exp_file_location_db[dataset]; apt_dir =fl.APTLocation()
if '/bin' in apt_dir: apt_file = apt_dir +'/apt-probeset-summarize' ### if the user selects an APT directory
elif os.name == 'nt': apt_file = apt_dir + '/PC/'+platform.architecture()[0]+'/apt-probeset-summarize.exe'
elif 'darwin' in sys.platform: apt_file = apt_dir + '/Mac/apt-probeset-summarize'
elif 'linux' in sys.platform:
if '32bit' in platform.architecture(): apt_file = apt_dir + '/Linux/32bit/apt-probeset-summarize'
elif '64bit' in platform.architecture(): apt_file = apt_dir + '/Linux/64bit/apt-probeset-summarize'
apt_file = filepath(apt_file)
os.chmod(apt_file,0777)
midas_dir = string.replace(apt_file,'apt-probeset-summarize','apt-midas')
os.chmod(midas_dir,0777)
APT.probesetSummarize(exp_file_location_db,analysis_method,filter_probeset_types,species,root)
except Exception:
print_out = 'AltAnalyze encountered an un-expected error while running Affymetrix\n'
print_out += 'Power Tools (APT). Additional information may be found in the directory\n'
print_out += '"ExpressionInput/APT" in the output directory. You may also encounter issues\n'
print_out += 'if you are logged into an account with restricted priveledges.\n\n'
print_out += 'If this issue can not be resolved, contact AltAnalyze help or run RMA outside\n'
print_out += 'of AltAnalyze and import the results using the analysis option "expression file".\n'
print traceback.format_exc()
try:
UI.WarningWindow(print_out,'Exit')
root.destroy(); sys.exit()
except Exception:
print print_out; sys.exit()
elif 'Feature Extraction' in run_from_scratch:
import ProcessAgilentArrays
try: ProcessAgilentArrays.agilentSummarize(exp_file_location_db)
except Exception:
print_out = 'Agilent array import and processing failed... see error log for details...'
print traceback.format_exc()
try:
UI.WarningWindow(print_out,'Exit')
root.destroy(); sys.exit()
except Exception:
print print_out; sys.exit()
reload(ProcessAgilentArrays)
if run_from_scratch == 'Process RNA-seq reads' or run_from_scratch == 'buildExonExportFiles':
import RNASeq; reload(RNASeq); import RNASeq
for dataset in exp_file_location_db: fl = exp_file_location_db[dataset]
### The below function aligns splice-junction coordinates to Ensembl exons from BED Files and
### exports AltAnalyze specific databases that are unique to this dataset to the output directory
try: fastq_folder = fl.RunKallisto()
except Exception: print traceback.format_exc()
if len(fastq_folder)>0:
try:
RNASeq.runKallisto(species,dataset,root_dir,fastq_folder,returnSampleNames=False)
biotypes = 'ran'
except Exception: biotypes='failed'
else:
analyzeBAMs = False; bedFilesPresent = False
dir_list = unique.read_directory(fl.BEDFileDir())
for file in dir_list:
if '.bam' in string.lower(file):
analyzeBAMs=True
if '.bed' in string.lower(file):
bedFilesPresent=True
if analyzeBAMs and bedFilesPresent==False:
import multiBAMtoBED
bam_dir = fl.BEDFileDir()
refExonCoordinateFile = filepath('AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt')
outputExonCoordinateRefBEDfile = bam_dir+'/BedRef/'+species+'_'+string.replace(dataset,'exp.','')
analysisType = ['exon','junction','reference']
#analysisType = ['junction']
multiBAMtoBED.parallelBAMProcessing(bam_dir,refExonCoordinateFile,outputExonCoordinateRefBEDfile,analysisType=analysisType,useMultiProcessing=fl.multiThreading(),MLP=mlp,root=root)
biotypes = RNASeq.alignExonsAndJunctionsToEnsembl(species,exp_file_location_db,dataset,Multi=mlp)
if biotypes == 'failed':
print_out = 'No valid chromosomal positions in the input BED or BioScope files. Exiting AltAnalyze.'
if len(fastq_folder)>0:
if 'FTP' in traceback.format_exc():
print_out = 'AltAnlayze was unable to retreive a transcript fasta sequence file from the Ensembl website. '
print_out += 'Ensure you are connected to the internet and that the website http://ensembl.org is live.'
else:
print_out = 'An unexplained error was encountered with Kallisto analysis:\n'
print_out += traceback.format_exc()
try:
UI.WarningWindow(print_out,'Exit')
root.destroy(); sys.exit()
except Exception:
print print_out; sys.exit()
reload(RNASeq)
if root_dir in biotypes:
print_out = 'Exon-level BED coordinate predictions exported to:\n'+biotypes
print_out+= '\n\nAfter obtaining exon expression estimates, rename exon BED files to\n'
print_out+= 'match the junction name (e.g., Sample1__exon.bed and Sample1__junction.bed)\n'
print_out+= 'and re-run AltAnalyze (see tutorials at http://altanalyze.org for help).'
UI.InfoWindow(print_out,'Export Complete')
try: root.destroy(); sys.exit()
except Exception: sys.exit()
if predictGroups == True:
expFile = fl.ExpFile()
if array_type == 'RNASeq':
exp_threshold=100; rpkm_threshold=10
else:
exp_threshold=200; rpkm_threshold=8
RNASeq.singleCellRNASeqWorkflow(species, array_type, expFile, mlp, exp_threshold=exp_threshold, rpkm_threshold=rpkm_threshold)
goelite_run = False
if run_from_scratch == 'Process Expression file' or run_from_scratch == 'Process CEL files' or run_from_scratch == 'Process RNA-seq reads' or 'Feature Extraction' in run_from_scratch:
if (fl.NormMatrix()=='quantile' or fl.NormMatrix()=='group') and 'Feature Extraction' not in run_from_scratch:
import NormalizeDataset
try: NormalizeDataset.normalizeDataset(fl.ExpFile(),normalization=fl.NormMatrix(),platform=array_type)
except Exception: print "Normalization failed for unknown reasons..."
#"""
status = ExpressionBuilder.remoteExpressionBuilder(species,array_type,
dabg_p,raw_expression_threshold,avg_all_for_ss,expression_data_format,
manufacturer,constitutive_source,data_source,include_raw_data,
perform_alt_analysis,ge_fold_cutoffs,ge_pvalue_cutoffs,ge_ptype,
exp_file_location_db,root)
reload(ExpressionBuilder) ### Clears Memory
#"""
graphics=[]
if fl.MarkerFinder() == 'yes':
### Identify putative condition-specific marker genees
import markerFinder
fl.setOutputDir(root_dir) ### This needs to be set here
exp_file = fl.ExpFile()
if array_type != "3'array": exp_file = string.replace(exp_file,'.txt','-steady-state.txt')
markerFinder_inputs = [exp_file,fl.DatasetFile()] ### Output a replicate and non-replicate version
markerFinder_inputs = [exp_file] ### Only considers the replicate and not mean analysis (recommended)
for input_exp_file in markerFinder_inputs:
### This applies to an ExpressionOutput DATASET file compoosed of gene expression values (averages already present)
try:
output_dir = markerFinder.getAverageExpressionValues(input_exp_file,array_type) ### Either way, make an average annotated file from the DATASET file
except Exception:
print "Unknown MarkerFinder failure (possible filename issue or data incompatibility)..."
print traceback.format_exc()
continue
if 'DATASET' in input_exp_file:
group_exp_file = string.replace(input_exp_file,'DATASET','AVERAGE')
else:
group_exp_file = (input_exp_file,output_dir) ### still analyze the primary sample
compendiumType = 'protein_coding'
if expression_data_format == 'non-log': logTransform = True
else: logTransform = False
try: markerFinder.analyzeData(group_exp_file,species,array_type,compendiumType,AdditionalParameters=fl,logTransform=logTransform)
except Exception: None
### Generate heatmaps (unclustered - order by markerFinder)
try: graphics = markerFinder.generateMarkerHeatMaps(fl,array_type,graphics=graphics,Species=species)
except Exception: print traceback.format_exc()
remove_intronic_junctions = original_remove_intronic_junctions ### This var gets reset when running FilterDABG
try:
summary_data_db['QC'] = fl.GraphicLinks()+graphics ### provides links for displaying QC and clustering plots
except Exception:
null=[] ### Visualization support through matplotlib either not present or visualization options excluded
#print '!!!!!finished expression builder'
#returnLargeGlobalVars()
expression_data_format = 'log' ### This variable is set from non-log in FilterDABG when present (version 1.16)
try:
parent_dir = fl.RootDir()+'/GO-Elite/regulated/'
dir_list = read_directory(parent_dir)
for file in dir_list:
input_file_dir = parent_dir+'/'+file
inputType = 'IDs'
interactionDirs = ['WikiPathways','KEGG','BioGRID','TFTargets']
output_dir = parent_dir
degrees = 'direct'
input_exp_file = input_file_dir
gsp = UI.GeneSelectionParameters(species,array_type,manufacturer)
gsp.setGeneSet('None Selected')
gsp.setPathwaySelect('')
gsp.setGeneSelection('')
gsp.setOntologyID('')
gsp.setIncludeExpIDs(True)
UI.networkBuilder(input_file_dir,inputType,output_dir,interactionDirs,degrees,input_exp_file,gsp,'')
except Exception:
print traceback.format_exc()
if status == 'stop':
### See if the array and species are compatible with GO-Elite analysis
system_codes = UI.getSystemInfo()
go_elite_analysis_supported = 'yes'
species_names = UI.getSpeciesInfo()
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]; results_dir = filepath(fl.RootDir())
### Perform GO-Elite Analysis
if pathway_permutations != 'NA':
try:
print '\nBeginning to run GO-Elite analysis on alternative exon results'
elite_input_dirs = ['AltExonConfirmed','AltExon','regulated','upregulated','downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir+'GO-Elite/'+elite_dir,results_dir+'GO-Elite/denominator',results_dir+'GO-Elite/'+elite_dir
input_dir = results_dir+'GO-Elite/'+elite_dir
variables = species,mod,pathway_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,root
try: input_files = read_directory(input_dir) ### Are there any files to analyze?
except Exception: input_files=[]
if len(input_files)>0:
try: GO_Elite.remoteAnalysis(variables,'non-UI',Multi=mlp); goelite_run = True
except Exception,e:
print e
print "GO-Elite analysis failed"
try: GO_Elite.moveMAPPFinderFiles(file_dirs[0])
except Exception: print 'Input GO-Elite files could NOT be moved.'
try: GO_Elite.moveMAPPFinderFiles(file_dirs[1])
except Exception: print 'Input GO-Elite files could NOT be moved.'
except Exception: pass
if goelite_run == False:
print 'No GO-Elite input files to analyze (check your criterion).'
print_out = 'Analysis complete. Gene expression\nsummary exported to "ExpressionOutput".'
try:
if use_Tkinter == 'yes':
print "Analysis Complete\n"; UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'GE',results_dir,dataset,'parent',summary_data_db)
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
if go_elite_analysis_supported == 'yes':
UI.getUpdatedParameters(array_type,species,run_from_scratch,file_dirs)
try: AltAnalyzeSetup('no')
except Exception:
print traceback.format_exc()
sys.exit()
else: print '\n'+print_out; sys.exit()
except Exception:
#print 'Failed to report status through GUI.'
sys.exit()
else: altanalyze_files = status[1] ### These files are the comparison files to analyze
elif run_from_scratch == 'update DBs':
null=[] ###Add link to new module here (possibly)
#updateDBs(species,array_type)
sys.exit()
if perform_alt_analysis != 'expression': ###Thus perform_alt_analysis = 'both' or 'alt' (default when skipping expression summary step)
###### Run AltAnalyze ######
global dataset_name; global summary_results_db; global summary_results_db2
summary_results_db={}; summary_results_db2={}; aspire_output_list=[]; aspire_output_gene_list=[]
onlyAnalyzeJunctions = 'no'; agglomerate_inclusion_probesets = 'no'; filter_probesets_by = 'NA'
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if filter_probeset_types == 'junctions-only': onlyAnalyzeJunctions = 'yes'
elif filter_probeset_types == 'combined-junctions': agglomerate_inclusion_probesets = 'yes'; onlyAnalyzeJunctions = 'yes'
elif filter_probeset_types == 'exons-only': analysis_method = 'splicing-index'; filter_probesets_by = 'exon'
if filter_probeset_types == 'combined-junctions' and array_type == 'junction' or array_type == 'RNASeq': filter_probesets_by = 'all'
else: filter_probesets_by = filter_probeset_types
c = 'Ensembl'; d = 'Entrez Gene'
annotation_system = c
expression_threshold = 0 ###This is different than the raw_expression_threshold (probably shouldn't filter so set to 0)
if analysis_method == 'linearregres-rlm': analysis_method = 'linearregres';use_R = 'yes'
if gene_expression_cutoff<1:
gene_expression_cutoff = 2 ### A number less than one is invalid
print "WARNING!!!! Invalid gene expression fold cutoff entered,\nusing the default value of 2, must be greater than 1."
log_fold_cutoff = math.log(float(gene_expression_cutoff),2)
if analysis_method != 'ASPIRE' and analysis_method != 'none':
if p_threshold <= 0 or p_threshold >1:
p_threshold = 0.05 ### A number less than one is invalid
print "WARNING!!!! Invalid alternative exon p-value threshold entered,\nusing the default value of 0.05."
if alt_exon_fold_variable<1:
alt_exon_fold_variable = 1 ### A number less than one is invalid
print "WARNING!!!! Invalid alternative exon fold cutoff entered,\nusing the default value of 2, must be greater than 1."
try: alt_exon_logfold_cutoff = math.log(float(alt_exon_fold_variable),2)
except Exception: alt_exon_logfold_cutoff = 1
else: alt_exon_logfold_cutoff = float(alt_exon_fold_variable)
global_addition_factor = 0
export_junction_comparisons = 'no' ### No longer accessed in this module - only in update mode through a different module
factor_out_expression_changes = 'yes' ### Use 'no' if data is normalized already or no expression normalization for ASPIRE desired
only_include_constitutive_containing_genes = 'yes'
remove_transcriptional_regulated_genes = 'yes'
add_exons_to_annotations = 'no'
exclude_protein_details = 'no'
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method: annotation_system = d
if 'linear' in analysis_method: analysis_method = 'linearregres'
if 'aspire' in analysis_method: analysis_method = 'ASPIRE'
if array_type == 'AltMouse': species = 'Mm'
#if export_NI_values == 'yes': remove_transcriptional_regulated_genes = 'no'
###Saves run-time while testing the software (global variable stored)
#import_dir = '/AltDatabase/affymetrix/'+species
#dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
### Get Ensembl-GO and pathway annotations from GO-Elite files
universalPrintFunction(["Importing GO-Elite pathway/GO annotations"])
global go_annotations; go_annotations={}
import BuildAffymetrixAssociations
go_annotations = BuildAffymetrixAssociations.getEnsemblAnnotationsFromGOElite(species)
global probeset_annotations_file
if array_type == 'RNASeq': probeset_annotations_file = root_dir+'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_junctions.txt'
elif array_type == 'AltMouse': probeset_annotations_file = 'AltDatabase/'+species+'/'+array_type+'/'+'MASTER-probeset-transcript.txt'
else: probeset_annotations_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_probesets.txt'
#"""
if analysis_method != 'none':
analysis_summary = RunAltAnalyze() ### Only run if analysis methods is specified (only available for RNA-Seq and junction analyses)
else: analysis_summary = None
if analysis_summary != None:
summary_results_db, aspire_output_gene_list, number_events_analyzed = analysis_summary
summary_data_db2 = copy.deepcopy(summary_data_db)
for i in summary_data_db2: del summary_data_db[i] ### If we reset the variable it violates it's global declaration... do this instead
#universalPrintFunction(['Alternative Exon Results for Junction Comparisons:'])
#for i in summary_data_db: universalPrintFunction([i+' '+ str(summary_data_db[i])])
exportSummaryResults(summary_results_db,analysis_method,aspire_output_list,aspire_output_gene_list,annotate_db,array_type,number_events_analyzed,root_dir)
else:
### Occurs for RNASeq when no junctions are present
summary_data_db2={}
if array_type == 'junction' or array_type == 'RNASeq':
#Reanalyze junction array data separately for individual probests rather than recipricol junctions
if array_type == 'junction': explicit_data_type = 'exon'
elif array_type == 'RNASeq': explicit_data_type = 'junction'
else: report_single_probeset_results = 'no'
### Obtain exon analysis defaults
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults('exon',species)
analysis_method, null, filter_probeset_types, null, null, alt_exon_fold_variable, null, null, null, null, null, null, null, calculate_normIntensity_p, null = alt_exon_defaults
filter_probesets_by = filter_probeset_types
if additional_algorithm == 'splicing-index' or additional_algorithm == 'FIRMA':
analysis_method = additional_algorithm
#print [analysis_method], [filter_probeset_types], [p_threshold], [alt_exon_fold_variable]
try: alt_exon_logfold_cutoff = math.log(float(additional_score),2)
except Exception: alt_exon_logfold_cutoff = 1
agglomerate_inclusion_probesets = 'no'
try:
summary_results_db, aspire_output_gene_list, number_events_analyzed = RunAltAnalyze()
exportSummaryResults(summary_results_db,analysis_method,aspire_output_list,aspire_output_gene_list,annotate_db,'exon',number_events_analyzed,root_dir)
if len(summary_data_db2)==0: summary_data_db2 = summary_data_db; explicit_data_type = 'exon-only'
#universalPrintFunction(['Alternative Exon Results for Individual Probeset Analyses:'])
#for i in summary_data_db: universalPrintFunction([i+' '+ str(summary_data_db[i])])
except Exception:
print traceback.format_exc()
None
#"""
### Perform dPSI Analysis
try:
if 'counts.' in fl.CountsFile(): pass
else:
dir_list = read_directory(fl.RootDir()+'ExpressionInput')
for file in dir_list:
if 'exp.' in file and 'steady-state' not in file:
fl.setExpFile(fl.RootDir()+'ExpressionInput/'+file)
#print [fl.RootDir()+'ExpressionInput/'+file]
except Exception:
search_dir = fl.RootDir()+'/ExpressionInput'
files = unique.read_directory(fl.RootDir()+'/ExpressionInput')
for file in files:
if 'exp.' in file and 'steady-state.txt' not in file:
fl.setExpFile(search_dir+'/'+file)
try:
#"""
try:
graphic_links2,cluster_input_file=ExpressionBuilder.unbiasedComparisonSpliceProfiles(fl.RootDir(),
species,array_type,expFile=fl.CountsFile(),min_events=0,med_events=1)
except Exception: pass
#"""
inputpsi = fl.RootDir()+'AltResults/AlternativeOutput/'+species+'_'+array_type+'_top_alt_junctions-PSI-clust.txt'
### Calculate ANOVA p-value stats based on groups
if array_type !='gene' and array_type != 'exon':
matrix,compared_groups,original_data = statistics.matrixImport(inputpsi)
matrix_pvalues=statistics.runANOVA(inputpsi,matrix,compared_groups)
anovaFilteredDir = statistics.returnANOVAFiltered(inputpsi,original_data,matrix_pvalues)
graphic_link1 = ExpressionBuilder.exportHeatmap(anovaFilteredDir)
try: summary_data_db2['QC']+=graphic_link1
except Exception: summary_data_db2['QC']=graphic_link1
except Exception: print traceback.format_exc()
import RNASeq
try:
graphic_link = RNASeq.compareExonAndJunctionResults(species,array_type,summary_results_db,root_dir)
try: summary_data_db2['QC']+=graphic_link
except Exception: summary_data_db2['QC']=graphic_link
except Exception:
print traceback.format_exc()
#"""
### Export the top 15 spliced genes
try:
altresult_dir = fl.RootDir()+'/AltResults/'
splicing_results_root = altresult_dir+'/Clustering/'
dir_list = read_directory(splicing_results_root)
gene_string=''
altanalyze_results_folder = altresult_dir+'/RawSpliceData/'+species
### Lookup the raw expression dir
expression_results_folder = string.replace(altresult_dir,'AltResults','ExpressionInput')
expression_dir = UI.getValidExpFile(expression_results_folder)
show_introns=False
try: altresult_dir = UI.getValidSplicingScoreFile(altanalyze_results_folder)
except Exception,e:
print traceback.format_exc()
analysisType='plot'
for file in dir_list:
if 'AltExonConfirmed' in file:
gene_dir = splicing_results_root+'/'+file
genes = UI.importGeneList(gene_dir,limit=50) ### list of gene IDs or symbols
gene_string = gene_string+','+genes
print 'Imported genes from',file,'\n'
analysisType='plot'
for file in dir_list:
if 'Combined-junction-exon-evidence' in file and 'top' not in file:
gene_dir = splicing_results_root+'/'+file
try: isoform_dir = UI.exportJunctionList(gene_dir,limit=50) ### list of gene IDs or symbols
except Exception: print traceback.format_exc()
UI.altExonViewer(species,array_type,expression_dir, gene_string, show_introns, analysisType, None); print 'completed'
UI.altExonViewer(species,array_type,altresult_dir, gene_string, show_introns, analysisType, None); print 'completed'
except Exception:
print traceback.format_exc()
if array_type != 'exon' and array_type != 'gene':
### SashimiPlot Visualization
try:
top_PSI_junction = inputpsi[:-4]+'-ANOVA.txt'
isoform_dir2 = UI.exportJunctionList(top_PSI_junction,limit=50) ### list of gene IDs or symbols
except Exception:
print traceback.format_exc()
try:
analyzeBAMs = False
dir_list = unique.read_directory(fl.RootDir())
for file in dir_list:
if '.bam' in string.lower(file):
analyzeBAMs=True
if analyzeBAMs:
### Create sashimi plot index
import SashimiIndex
SashimiIndex.remoteIndexing(species,fl)
import SashimiPlot
print 'Exporting Sashimi Plots for the top-predicted splicing events... be patient'
try: SashimiPlot.remoteSashimiPlot(species,fl,fl.RootDir(),isoform_dir) ### assuming the bam files are in the root-dir
except Exception: pass # print traceback.format_exc()
print 'completed'
try: SashimiPlot.remoteSashimiPlot(species,fl,fl.RootDir(),isoform_dir2) ### assuming the bam files are in the root-dir
except Exception: pass #print traceback.format_exc()
print 'completed'
### Try again, in case the symbol conversion failed
SashimiPlot.justConvertFilenames(species,fl.RootDir()+'/SashimiPlots')
else:
print 'No BAM files present in the root directory... skipping SashimiPlot analysis...'
except Exception:
print traceback.format_exc()
try:
clearObjectsFromMemory(exon_db); clearObjectsFromMemory(constitutive_probeset_db)
clearObjectsFromMemory(go_annotations); clearObjectsFromMemory(original_microRNA_z_score_data)
clearObjectsFromMemory(last_exon_region_db)
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
except Exception: null=[]
#print '!!!!!finished'
#returnLargeGlobalVars()
end_time = time.time(); time_diff = int(end_time-start_time)
universalPrintFunction(["Analyses finished in %d seconds" % time_diff])
#universalPrintFunction(["Hit Enter/Return to exit AltAnalyze"])
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]; results_dir = filepath(fl.RootDir())
### Perform GO-Elite Analysis
if pathway_permutations != 'NA':
goelite_run = False
print '\nBeginning to run GO-Elite analysis on alternative exon results'
elite_input_dirs = ['AltExonConfirmed','AltExon','regulated','upregulated','downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir+'GO-Elite/'+elite_dir,results_dir+'GO-Elite/denominator',results_dir+'GO-Elite/'+elite_dir
input_dir = results_dir+'GO-Elite/'+elite_dir
try: input_files = read_directory(input_dir) ### Are there any files to analyze?
except Exception: input_files = []
if len(input_files)>0:
variables = species,mod,pathway_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,root
try: GO_Elite.remoteAnalysis(variables,'non-UI',Multi=mlp); goelite_run = True
except Exception,e:
print e
print "GO-Elite analysis failed"
try: GO_Elite.moveMAPPFinderFiles(file_dirs[0])
except Exception: print 'Input GO-Elite files could NOT be moved.'
try: GO_Elite.moveMAPPFinderFiles(file_dirs[1])
except Exception: print 'Input GO-Elite files could NOT be moved.'
if goelite_run == False:
print 'No GO-Elite input files to analyze (check your criterion).'
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
try:
if root !='' and root !=None:
print "Analysis Complete\n";
UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'AS',results_dir,dataset_name,'specific',summary_data_db2)
except Exception:
print traceback.format_exc()
pass #print 'Failed to open GUI.'
skip_intro = 'yes'
if root !='' and root !=None:
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
try: UI.getUpdatedParameters(array_type,species,run_from_scratch,file_dirs)
except Exception: pass
try: AltAnalyzeSetup('no')
except Exception: sys.exit()
def exportSummaryResults(summary_results_db,analysis_method,aspire_output_list,aspire_output_gene_list,annotate_db,array_type,number_events_analyzed,root_dir):
try:
ResultsExport_module.outputSummaryResults(summary_results_db,'',analysis_method,root_dir)
#ResultsExport_module.outputSummaryResults(summary_results_db2,'-uniprot_attributes',analysis_method)
ResultsExport_module.compareAltAnalyzeResults(aspire_output_list,annotate_db,number_events_analyzed,'no',analysis_method,array_type,root_dir)
ResultsExport_module.compareAltAnalyzeResults(aspire_output_gene_list,annotate_db,'','yes',analysis_method,array_type,root_dir)
except UnboundLocalError: print "...No results to summarize" ###Occurs if there is a problem parsing these files
def checkGOEliteProbesets(fn,species):
### Get all probesets in GO-Elite files
mod_source = 'Ensembl'+'-'+'Affymetrix'
import gene_associations
try: ensembl_to_probeset_id = gene_associations.getGeneToUid(species,mod_source)
except Exception: ensembl_to_probeset_id={}
mod_source = 'EntrezGene'+'-'+'Affymetrix'
try: entrez_to_probeset_id = gene_associations.getGeneToUid(species,mod_source)
except Exception: entrez_to_probeset_id={}
probeset_db={}
for gene in ensembl_to_probeset_id:
for probeset in ensembl_to_probeset_id[gene]: probeset_db[probeset]=[]
for gene in entrez_to_probeset_id:
for probeset in entrez_to_probeset_id[gene]: probeset_db[probeset]=[]
###Import an Affymetrix array annotation file (from http://www.affymetrix.com) and parse out annotations
csv_probesets = {}; x=0; y=0
fn=filepath(fn); status = 'no'
for line in open(fn,'r').readlines():
probeset_data = string.replace(line,'\n','') #remove endline
probeset_data = string.replace(probeset_data,'---','')
affy_data = string.split(probeset_data[1:-1],'","')
if x==0 and line[0]!='#':
x=1; affy_headers = affy_data
for header in affy_headers:
y = 0
while y < len(affy_headers):
if 'Probe Set ID' in affy_headers[y] or 'probeset_id' in affy_headers[y]: ps = y
y+=1
elif x == 1:
try: probeset = affy_data[ps]; csv_probesets[probeset]=[]
except Exception: null=[]
for probeset in csv_probesets:
if probeset in probeset_db: status = 'yes';break
return status
class SpeciesData:
def __init__(self, abrev, species, systems, taxid):
self._abrev = abrev; self._species = species; self._systems = systems; self._taxid = taxid
def SpeciesCode(self): return self._abrev
def SpeciesName(self): return self._species
def Systems(self): return self._systems
def TaxID(self): return self._taxid
def __repr__(self): return self.SpeciesCode()+'|'+SpeciesName
def getSpeciesInfo():
### Used by AltAnalyze
UI.importSpeciesInfo(); species_names={}
for species_full in species_codes:
sc = species_codes[species_full]; abrev = sc.SpeciesCode()
species_names[abrev] = species_full
return species_codes,species_names
def importGOEliteSpeciesInfo():
filename = 'Config/goelite_species.txt'; x=0
fn=filepath(filename); species_codes={}
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
abrev,species,taxid,compatible_mods = string.split(data,'\t')
if x==0: x=1
else:
compatible_mods = string.split(compatible_mods,'|')
sd = SpeciesData(abrev,species,compatible_mods,taxid)
species_codes[species] = sd
return species_codes
def exportGOEliteSpeciesInfo(species_codes):
fn=filepath('Config/goelite_species.txt'); data = open(fn,'w'); x=0
header = string.join(['species_code','species_name','tax_id','compatible_algorithms'],'\t')+'\n'
data.write(header)
for species in species_codes:
if 'other' not in species and 'all-' not in species:
sd = species_codes[species]
mods = string.join(sd.Systems(),'|')
values = [sd.SpeciesCode(),sd.SpeciesName(),sd.TaxID(),mods]
values = string.join(values,'\t')+'\n'
data.write(values)
data.close()
def TimeStamp():
time_stamp = time.localtime()
year = str(time_stamp[0]); month = str(time_stamp[1]); day = str(time_stamp[2])
if len(month)<2: month = '0'+month
if len(day)<2: day = '0'+day
return year+month+day
def verifyFile(filename):
status = 'not found'
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines(): status = 'found';break
except Exception: status = 'not found'
return status
def verifyFileLength(filename):
count = 0
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
count+=1
if count>9: break
except Exception: null=[]
return count
def verifyGroupFileFormat(filename):
correct_format = False
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if len(string.split(data,'\t'))==3:
correct_format = True
break
except Exception: correct_format = False
return correct_format
def displayHelp():
fn=filepath('Documentation/commandline.txt')
print '\n################################################\nAltAnalyze Command-Line Help'
for line in open(fn,'rU').readlines():
print cleanUpLine(line)
print '\n################################################ - END HELP'
sys.exit()
def searchDirectory(directory,var):
directory = unique.filepath(directory)
files = unique.read_directory(directory)
version = unique.getCurrentGeneDatabaseVersion()
for file in files:
if var in file:
location = string.split(directory+'/'+file,version)[1][1:]
return [location]
break
###### Command Line Functions (AKA Headless Mode) ######
def commandLineRun():
print 'Running commandline options'
import getopt
#/hd3/home/nsalomonis/normalization/mir1 - boxer
#python AltAnalyze.py --species Mm --arraytype "3'array" --celdir "C:/CEL" --output "C:/CEL" --expname miR1_column --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --species Hs --arraytype "3'array" --FEdir "C:/FEfiles" --output "C:/FEfiles" --channel_to_extract "green/red ratio" --expname cancer --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --celdir "C:/CEL" --output "C:/CEL" --expname miR1_column
#open ./AltAnalyze.app --celdir "/Users/nsalomonis/Desktop" --output "/Users/nsalomonis/Desktop" --expname test
#python AltAnalyze.py --species Mm --arraytype "3'array" --expdir "C:/CEL/ExpressionInput/exp.miR1_column.txt" --output "C:/CEL" --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --species Mm --platform RNASeq --bedDir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles" --groupdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/ExpressionInput/groups.test.txt" --compdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/ExpressionInput/comps.test.txt" --output "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles" --expname "test"
#python AltAnalyze.py --species Mm --platform RNASeq --filterdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/" --output "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles"
#python AltAnalyze.py --expdir "/Users/nsalomonis/Desktop/Nathan/ExpressionInput/exp.test.txt" --exonMapFile "/Users/nsalomonis/Desktop/Nathan/hgu133_probe.txt" --species Hs --platform "3'array" --output "/Users/nsalomonis/Desktop/Nathan"
#python AltAnalyze.py --species Hs --platform "3'array" --expname test --channelToExtract green --FEdir /Users/saljh8/Downloads/AgllentTest/ --output /Users/saljh8/Downloads/AgllentTest/
global apt_location; global root_dir; global probability_statistic; global log_file; global summary_data_db; summary_data_db={}
###required
marker_finder='no'
manufacturer='Affymetrix'
constitutive_source='Ensembl'
ensembl_version = 'current'
species_code = None
species = None
main_input_folder = None
output_dir = None
array_type = None
input_annotation_file = None
groups_file = None
comps_file = None
input_cdf_file = None
exp_name = None
run_GOElite = 'yes'
visualize_qc_results = 'yes'
run_lineage_profiler = 'yes'
input_exp_file = ''
cel_file_dir = ''
input_stats_file = ''
input_filtered_dir = ''
external_annotation_dir = ''
xhyb_remove = 'no'
update_method = []
update_dbs = 'no'
analyze_all_conditions = 'no'
return_all = 'no'
additional_array_types = []
remove_intronic_junctions = 'no'
ignore_built_species = 'no'
build_exon_bedfile = 'no'
compendiumType = 'protein_coding'
probability_statistic = 'unpaired t-test'
specific_array_type = None
additional_resources = [None]
wpid = None
mod = 'Ensembl'
transpose = False
input_file_dir = None
denom_file_dir = None
image_export = []
selected_species = ['Hs','Mm','Rn'] ### These are the species that additional array types are currently supported
selected_platforms = ['AltMouse','exon','gene','junction']
returnPathways = 'no'
compendiumPlatform = 'gene'
exonMapFile = None
platformType = None ### This option is used to store the orignal platform type
perform_alt_analysis = 'no'
mappedExonAnalysis = False ### Map the original IDs to the RNA-Seq exon database (when True)
microRNA_prediction_method = None
pipelineAnalysis = True
OntologyID=''
PathwaySelection=''
GeneSetSelection=''
interactionDirs=[]
inputType='ID list'
Genes=''
degrees='direct'
includeExpIDs=True
update_interactions=False
data_type = 'raw expression'
batch_effects = 'no'
channel_to_extract = None
normalization = False
justShowTheseIDs = ''
display=False
accessoryAnalysis=''
modelSize=None
geneModel=False
run_from_scratch = None
systemToUse = None ### For other IDs
custom_reference = False
multiThreading = True
genesToReport = 60
correlateAll = True
expression_data_format='log'
runICGS=False
IDtype=None
runKallisto = False
original_arguments = sys.argv
arguments=[]
for arg in original_arguments:
arg = string.replace(arg,'\xe2\x80\x9c','') ### These are non-standard forward quotes
arg = string.replace(arg,'\xe2\x80\x9d','') ### These are non-standard reverse quotes
arg = string.replace(arg,'\xe2\x80\x93','-') ### These are non-standard dashes
arg = string.replace(arg,'\x96','-') ### These are non-standard dashes
arg = string.replace(arg,'\x93','') ### These are non-standard forward quotes
arg = string.replace(arg,'\x94','') ### These are non-standard reverse quotes
arguments.append(arg)
print '\nArguments input:',arguments,'\n'
if '--help' in arguments[1:] or '--h' in arguments[1:]:
try: displayHelp() ### Print out a help file and quit
except Exception: print 'See: http://www.altanalyze.org for documentation and command-line help';sys.exit()
if 'AltAnalyze' in arguments[1]:
arguments = arguments[1:] ### Occurs on Ubuntu with the location of AltAnalyze being added to sys.argv (exclude this since no argument provided for this var)
try:
options, remainder = getopt.getopt(arguments[1:],'', ['species=', 'mod=','elitepval=', 'elitepermut=',
'method=','zscore=','pval=','num=',
'runGOElite=','denom=','output=','arraytype=',
'celdir=','expdir=','output=','statdir=',
'filterdir=','cdfdir=','csvdir=','expname=',
'dabgp=','rawexp=','avgallss=','logexp=',
'inclraw=','runalt=','altmethod=','altp=',
'probetype=','altscore=','GEcutoff=',
'exportnormexp=','calcNIp=','runMiDAS=',
'GEcutoff=','GEelitepval=','mirmethod=','ASfilter=',
'vendor=','GEelitefold=','update=','version=',
'analyzeAllGroups=','GEeliteptype=','force=',
'resources_to_analyze=', 'dataToAnalyze=','returnAll=',
'groupdir=','compdir=','annotatedir=','additionalScore=',
'additionalAlgorithm=','noxhyb=','platform=','bedDir=',
'altpermutep=','altpermute=','removeIntronOnlyJunctions=',
'normCounts=','buildExonExportFile=','groupStat=',
'compendiumPlatform=','rpkm=','exonExp=','specificArray=',
'ignoreBuiltSpecies=','ORAstat=','outputQCPlots=',
'runLineageProfiler=','input=','image=', 'wpid=',
'additional=','row_method=','column_method=',
'row_metric=','column_metric=','color_gradient=',
'transpose=','returnPathways=','compendiumType=',
'exonMapFile=','geneExp=','labels=','contrast=',
'plotType=','geneRPKM=','exonRPKM=','runMarkerFinder=',
'update_interactions=','includeExpIDs=','degrees=',
'genes=','inputType=','interactionDirs=','GeneSetSelection=',
'PathwaySelection=','OntologyID=','dataType=','combat=',
'channelToExtract=','showIntrons=','display=','join=',
'uniqueOnly=','accessoryAnalysis=','inputIDType=','outputIDType=',
'FEdir=','channelToExtract=','AltResultsDir=','geneFileDir=',
'AltResultsDir=','modelSize=','geneModel=','reference=',
'multiThreading=','multiProcessing=','genesToReport=',
'correlateAll=','normalization=','justShowTheseIDs=',
'direction=','analysisType=','algorithm=','rho=',
'clusterGOElite=','geneSetName=','runICGS=','IDtype=',
'CountsCutoff=','FoldDiff=','SamplesDiffering=','removeOutliers='
'featurestoEvaluate=','restrictBy=','ExpressionCutoff=',
'excludeCellCycle=','runKallisto=','fastq_dir=','FDR='])
except Exception:
print traceback.format_exc()
print "There is an error in the supplied command-line arguments (each flag requires an argument)"; sys.exit()
for opt, arg in options:
#print [opt, arg]
if opt == '--species': species=arg
elif opt == '--arraytype':
if array_type != None: additional_array_types.append(arg)
else: array_type=arg; platform = array_type
if specific_array_type == None: specific_array_type = platform
elif opt == '--exonMapFile':
perform_alt_analysis = 'yes' ### Perform alternative exon analysis
exonMapFile = arg
elif opt == '--specificArray': specific_array_type = arg ### e.g., hGlue
elif opt == '--celdir': cel_file_dir=arg
elif opt == '--bedDir': cel_file_dir=arg
elif opt == '--FEdir': cel_file_dir = arg
elif opt == '--expdir': input_exp_file=arg
elif opt == '--statdir': input_stats_file=arg
elif opt == '--filterdir': input_filtered_dir=arg
elif opt == '--groupdir': groups_file=arg
elif opt == '--compdir': comps_file=arg
elif opt == '--cdfdir': input_cdf_file=arg
elif opt == '--csvdir': input_annotation_file=arg
elif opt == '--expname': exp_name=arg
elif opt == '--output': output_dir=arg
elif opt == '--vendor': manufacturer=arg
elif opt == '--runICGS': runICGS=True
elif opt == '--IDtype': IDtype=arg
elif opt == '--ignoreBuiltSpecies': ignore_built_species=arg
elif opt == '--platform':
if array_type != None: additional_array_types.append(arg)
else: array_type=arg; platform = array_type
if specific_array_type == None: specific_array_type = platform
elif opt == '--update': update_dbs='yes'; update_method.append(arg)
elif opt == '--version': ensembl_version = arg
elif opt == '--compendiumPlatform': compendiumPlatform=arg ### platform for which the LineageProfiler compendium is built on
elif opt == '--force': force=arg
elif opt == '--input': input_file_dir=arg; pipelineAnalysis = False ### If this option is entered, only perform the indicated analysis
elif opt == '--image': image_export.append(arg)
elif opt == '--wpid': wpid=arg
elif opt == '--mod': mod=arg
elif opt == '--runKallisto':
if arg == 'yes' or string.lower(arg) == 'true':
runKallisto = True
elif opt == '--fastq_dir':
input_fastq_dir = arg
elif opt == '--additional':
if additional_resources[0] == None:
additional_resources=[]
additional_resources.append(arg)
else:
additional_resources.append(arg)
elif opt == '--transpose':
if arg == 'True': transpose = True
elif opt == '--runLineageProfiler': ###Variable declared here and later (independent analysis here or pipelined with other analyses later)
run_lineage_profiler=arg
elif opt == '--compendiumType': ### protein-coding, ncRNA, or exon
compendiumType=arg
elif opt == '--denom':
denom_file_dir=arg ### Indicates that GO-Elite is run independent from AltAnalyze itself
elif opt == '--accessoryAnalysis':
accessoryAnalysis = arg
elif opt == '--channelToExtract': channel_to_extract=arg
elif opt == '--genesToReport': genesToReport = int(arg)
elif opt == '--correlateAll': correlateAll = True
elif opt == '--direction': direction = arg
elif opt == '--logexp': expression_data_format=arg
elif opt == '--geneRPKM': rpkm_threshold=arg
elif opt == '--multiThreading' or opt == '--multiProcessing':
multiThreading=arg
if multiThreading == 'yes': multiThreading = True
elif 'rue' in multiThreading: multiThreading = True
else: multiThreading = False
if 'other' in manufacturer or 'Other' in manufacturer:
### For other IDs
systemToUse = array_type
if array_type == None:
print 'Please indicate a ID type as --platform when setting vendor equal to "Other IDs"'; sys.exit()
array_type = "3'array"
if array_type == 'RNASeq': manufacturer = array_type
if platformType == None: platformType = array_type
if perform_alt_analysis == 'yes':
if platform == "3'array":
mappedExonAnalysis = True
cel_file_dir = input_exp_file
exp_name = export.findFilename(input_exp_file)
exp_name = string.replace(exp_name,'.txt','')
exp_name = string.replace(exp_name,'exp.','')
input_exp_file = ''
### To perform alternative exon analyses for platforms without a dedicated database, must happing appropriate mapping info or array type data
### (will need to perform downstream testing for unsupported Affymetrix exon, gene and junction arrays)
if exonMapFile == None and specific_array_type == None and cel_file_dir == '':
print_out = "\nUnable to run!!! Please designate either a specific platfrom (e.g., --specificArray hgU133_2), select CEL files, or an "
print_out += "exon-level mapping file location (--exonMapFile C:/mapping.txt) to perform alternative exon analyses for this platform."
### Will need to check here to see if the platform is supported (local or online files) OR wait until an error is encountered later
######## Perform analyses independent from AltAnalyze database centric analyses that require additional parameters
if len(image_export) > 0 or len(accessoryAnalysis)>0 or runICGS:
if runICGS:
#python AltAnalyze.py --runICGS yes --expdir "/Users/saljh8/Desktop/demo/Myoblast/ExpressionInput/exp.myoblast.txt" --platform "3'array" --species Hs --GeneSetSelection BioMarkers --PathwaySelection Heart --column_method hopach --rho 0.4 --ExpressionCutoff 200 --justShowTheseIDs "NKX2-5 T TBX5" --FoldDiff 10 --SamplesDiffering 3 --excludeCellCycle conservative
try: species = species
except Exception: 'Please designate a species before continuing (e.g., --species Hs)'
try: array_type = array_type
except Exception: 'Please designate a species before continuing (e.g., --species Hs)'
if len(cel_file_dir)>0:
values = species,exp_file_location_db,dataset,mlp_instance
StatusWindow(values,'preProcessRNASeq') ### proceed to run the full discovery analysis here!!!
else:
if len(input_exp_file) > 0: pass
else: 'Please indicate a source folder or expression file (e.g., --expdir /dataset/singleCells.txt)'
if array_type == 'Other' or 'Other' in array_type:
if ':' in array_type:
array_type, IDtype = string.split(array_type)
array_type == "3'array"
if IDtype == None: IDtype = manufacturer
row_method = 'weighted'
column_method = 'average'
row_metric = 'cosine'
column_metric = 'cosine'
color_gradient = 'yellow_black_blue'
contrast=3
vendor = manufacturer
GeneSelection = ''
PathwaySelection = ''
GeneSetSelection = 'None Selected'
excludeCellCycle = True
rho_cutoff = 0.4
restrictBy = 'protein_coding'
featurestoEvaluate = 'Genes'
ExpressionCutoff = 1
CountsCutoff = 1
FoldDiff = 2
SamplesDiffering = 3
JustShowTheseIDs=''
removeOutliers = False
PathwaySelection=[]
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--row_method':
row_method=arg
if row_method == 'None': row_method = None
elif opt == '--column_method':
column_method=arg
if column_method == 'None': column_method = None
elif opt == '--row_metric': row_metric=arg
elif opt == '--column_metric': column_metric=arg
elif opt == '--color_gradient': color_gradient=arg
elif opt == '--GeneSetSelection': GeneSetSelection=arg
elif opt == '--PathwaySelection': PathwaySelection.append(arg)
elif opt == '--genes': GeneSelection=arg
elif opt == '--ExpressionCutoff': ExpressionCutoff=arg
elif opt == '--normalization': normalization=arg
elif opt == '--justShowTheseIDs': justShowTheseIDs=arg
elif opt == '--rho': rho_cutoff=float(arg)
elif opt == '--clusterGOElite':clusterGOElite=float(arg)
elif opt == '--CountsCutoff':CountsCutoff=int(float(arg))
elif opt == '--FoldDiff':FoldDiff=int(float(arg))
elif opt == '--SamplesDiffering':SamplesDiffering=int(float(arg))
elif opt == '--removeOutliers':removeOutliers=arg
elif opt == '--featurestoEvaluate':featurestoEvaluate=arg
elif opt == '--restrictBy':restrictBy=arg
elif opt == '--excludeCellCycle':
excludeCellCycle=arg
if excludeCellCycle == 'False' or excludeCellCycle == 'no': excludeCellCycle = False
elif excludeCellCycle == 'True' or excludeCellCycle == 'yes' or excludeCellCycle == 'conservative': excludeCellCycle = True
elif opt == '--contrast':
try: contrast=float(arg)
except Exception: print '--contrast not a valid float';sys.exit()
elif opt == '--vendor': vendor=arg
elif opt == '--display':
if arg=='yes':
display=True
elif arg=='True':
display=True
else:
display=False
if len(PathwaySelection)==0: PathwaySelection=''
if len(GeneSetSelection)>0 or GeneSelection != '':
gsp = UI.GeneSelectionParameters(species,array_type,vendor)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setJustShowTheseIDs(JustShowTheseIDs)
gsp.setNormalize('median')
gsp.setSampleDiscoveryParameters(ExpressionCutoff,CountsCutoff,FoldDiff,SamplesDiffering,
removeOutliers,featurestoEvaluate,restrictBy,excludeCellCycle,column_metric,column_method,rho_cutoff)
import RNASeq
mlp_instance = mlp
if cel_file_dir != '':
expFile = cel_file_dir + '/ExpressionInput/'+ 'exp.'+exp_name+'.txt'
elif input_exp_file !='':
if 'ExpressionInput' in input_exp_file: expFile = input_exp_file
else:
### Copy over expression file to ExpressionInput
expdir2 = string.replace(input_exp_file,'exp.','')
root_dir = export.findParentDir(expFile)
expFile = root_dir+'/ExpressionInput/exp.'+export.findFilename(expdir2)
export.copyFile(input_exp_file, expFile)
global log_file
root_dir = export.findParentDir(expFile)
root_dir = string.replace(root_dir,'/ExpressionInput','')
time_stamp = timestamp()
log_file = filepath(root_dir+'AltAnalyze_report-'+time_stamp+'.log')
log_report = open(log_file,'w'); log_report.close()
sys.stdout = Logger('')
count = verifyFileLength(expFile[:-4]+'-steady-state.txt')
if count>1:
expFile = expFile[:-4]+'-steady-state.txt'
elif array_type=='RNASeq':
### Indicates that the steady-state file doesn't exist. The exp. may exist, be could be junction only so need to re-build from bed files here
values = species,exp_file_location_db,dataset,mlp_instance
StatusWindow(values,'preProcessRNASeq') ### proceed to run the full discovery analysis here!!!
expFile = expFile[:-4]+'-steady-state.txt'
print [excludeCellCycle]
UI.RemotePredictSampleExpGroups(expFile, mlp_instance, gsp,(species,array_type)) ### proceed to run the full discovery analysis here!!!
sys.exit()
if 'WikiPathways' in image_export:
#python AltAnalyze.py --input /Users/test/input/criterion1.txt --image WikiPathways --mod Ensembl --species Hs --wpid WP536
if wpid==None:
print 'Please provide a valid WikiPathways ID (e.g., WP1234)';sys.exit()
if species==None:
print 'Please provide a valid species ID for an installed database (to install: --update Official --species Hs --version EnsMart62Plus)';sys.exit()
if input_file_dir==None:
print 'Please provide a valid file location for your input IDs (also needs to inlcude system code and value column)';sys.exit()
import WikiPathways_webservice
try:
print 'Attempting to output a WikiPathways colored image from user data'
print 'mod:',mod
print 'species_code:',species
print 'wpid:',wpid
print 'input GO-Elite ID file:',input_file_dir
graphic_link = WikiPathways_webservice.visualizePathwayAssociations(input_file_dir,species,mod,wpid)
except Exception,e:
if 'force_no_matching_error' in traceback.format_exc():
print '\nUnable to run!!! None of the input IDs mapped to this pathway\n'
elif 'IndexError' in traceback.format_exc():
print '\nUnable to run!!! Input ID file does not have at least 3 columns, with the second column being system code\n'
elif 'ValueError' in traceback.format_exc():
print '\nUnable to run!!! Input ID file error. Please check that you do not have extra rows with no data\n'
elif 'source_data' in traceback.format_exc():
print '\nUnable to run!!! Input ID file does not contain a valid system code\n'
elif 'goelite' in traceback.format_exc():
print '\nUnable to run!!! A valid species database needs to first be installed. For example, run:'
print 'python AltAnalyze.py --update Official --species Hs --version EnsMart65\n'
else:
print traceback.format_exc()
print '\nError generating the pathway "%s"' % wpid,'\n'
try:
printout = 'Finished exporting visualized pathway to:',graphic_link['WP']
print printout,'\n'
except Exception: None
sys.exit()
if 'MergeFiles' in accessoryAnalysis:
#python AltAnalyze.py --accessoryAnalysis MergeFiles --input "C:\file1.txt" --input "C:\file2.txt" --output "C:\tables"
files_to_merge=[]
join_option='Intersection'
uniqueOnly=False
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--input': files_to_merge.append(arg)
if opt == '--join': join_option = arg
if opt == '--uniqueOnly': unique_only = arg
if len(files_to_merge)<2:
print 'Please designate two or more files to merge (--input)';sys.exit()
UI.MergeFiles(files_to_merge, join_option, uniqueOnly, output_dir, None)
sys.exit()
if 'IDTranslation' in accessoryAnalysis:
#python AltAnalyze.py --accessoryAnalysis IDTranslation --inputIDType Symbol --outputIDType RefSeq --input "C:\file1.txt" --species Hs
inputIDType=None
outputIDType=None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--inputIDType': inputIDType = arg
if opt == '--outputIDType': outputIDType = arg
if inputIDType==None or outputIDType==None:
print 'Please designate an input ID type and and output ID type (--inputIDType Ensembl --outputIDType Symbol)'; sys.exit()
if species==None:
print "Please enter a valide species (--species)"; sys.exit()
UI.IDconverter(input_file_dir, species, inputIDType, outputIDType, None)
sys.exit()
if 'hierarchical' in image_export:
#python AltAnalyze.py --input "/Users/test/pluri.txt" --image hierarchical --row_method average --column_method single --row_metric cosine --column_metric euclidean --color_gradient red_white_blue --transpose False --PathwaySelection Apoptosis:WP254 --GeneSetSelection WikiPathways --species Hs --platform exon --display false
if input_file_dir==None:
print 'Please provide a valid file location for your input data matrix (must have an annotation row and an annotation column)';sys.exit()
row_method = 'weighted'
column_method = 'average'
row_metric = 'cosine'
column_metric = 'cosine'
color_gradient = 'red_black_sky'
contrast=2.5
vendor = 'Affymetrix'
GeneSelection = ''
PathwaySelection = ''
GeneSetSelection = 'None Selected'
rho = None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--row_method':
row_method=arg
if row_method == 'None': row_method = None
elif opt == '--column_method':
column_method=arg
if column_method == 'None': column_method = None
elif opt == '--row_metric': row_metric=arg
elif opt == '--column_metric': column_metric=arg
elif opt == '--color_gradient': color_gradient=arg
elif opt == '--GeneSetSelection': GeneSetSelection=arg
elif opt == '--PathwaySelection': PathwaySelection=arg
elif opt == '--genes': GeneSelection=arg
elif opt == '--OntologyID': OntologyID=arg
elif opt == '--normalization': normalization=arg
elif opt == '--justShowTheseIDs': justShowTheseIDs=arg
elif opt == '--rho': rho=arg
elif opt == '--clusterGOElite':clusterGOElite=arg
elif opt == '--contrast':
try: contrast=float(arg)
except Exception: print '--contrast not a valid float';sys.exit()
elif opt == '--vendor': vendor=arg
elif opt == '--display':
if arg=='yes':
display=True
elif arg=='True':
display=True
else:
display=False
if len(GeneSetSelection)>0 or GeneSelection != '':
gsp = UI.GeneSelectionParameters(species,array_type,vendor)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setOntologyID(OntologyID)
gsp.setTranspose(transpose)
gsp.setNormalize(normalization)
gsp.setJustShowTheseIDs(justShowTheseIDs)
try: gsp.setClusterGOElite(clusterGOElite)
except Exception: pass
if rho!=None:
try:
float(rho)
gsp.setRhoCutoff(rho)
except Exception: print 'Must enter a valid Pearson correlation cutoff (float)'
transpose = gsp ### this allows methods that don't transmit this object to also work
if row_method == 'no': row_method = None
if column_method == 'no': column_method = None
if len(GeneSetSelection)>0:
if species == None:
print "Please enter a valide species (--species)"; sys.exit()
try:
files = unique.read_directory(input_file_dir+'/')
dir = input_file_dir
for file in files:
filename = dir+'/'+file
UI.createHeatMap(filename, row_method, row_metric, column_method, column_metric, color_gradient, transpose, contrast, None, display=display)
except Exception:
UI.createHeatMap(input_file_dir, row_method, row_metric, column_method, column_metric, color_gradient, transpose, contrast, None, display=display)
#import clustering; clustering.outputClusters([input_file_dir],[])
sys.exit()
if 'PCA' in image_export:
#AltAnalyze.py --input "/Users/nsalomonis/Desktop/folds.txt" --image PCA --plotType 3D --display True --labels yes
#--algorithm "t-SNE"
include_labels = 'yes'
plotType = '2D'
pca_algorithm = 'SVD'
geneSetName = None
zscore = True
colorByGene=None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--labels':
include_labels=arg
if include_labels == 'True' or include_labels == 'yes':
include_labels = 'yes'
else:
include_labels = 'no'
if opt == '--plotType': plotType=arg
if opt == '--algorithm': pca_algorithm=arg
if opt == '--geneSetName': geneSetName=arg
if opt == '--genes': colorByGene=arg
if opt == '--zscore':
if arg=='yes' or arg=='True' or arg == 'true':
zscore=True
else:
zscore=False
if opt == '--display':
if arg=='yes' or arg=='True' or arg == 'true':
display=True
if input_file_dir==None:
print 'Please provide a valid file location for your input data matrix (must have an annotation row and an annotation column)';sys.exit()
UI.performPCA(input_file_dir, include_labels, pca_algorithm, transpose, None,
plotType=plotType, display=display, geneSetName=geneSetName, species=species, zscore=zscore, colorByGene=colorByGene)
sys.exit()
if 'VennDiagram' in image_export:
# AltAnalyze.py --image "VennDiagram" --input "C:\file1.txt" --input "C:\file2.txt" --output "C:\graphs"
files_to_merge=[]
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--input': files_to_merge.append(arg)
if opt == '--display':
if arg=='yes' or arg=='True' or arg == 'true':
display=True
if len(files_to_merge)<2:
print 'Please designate two or more files to compare (--input)';sys.exit()
UI.vennDiagram(files_to_merge, output_dir, None, display=display)
sys.exit()
if 'AltExonViewer' in image_export:
#python AltAnalyze.py --image AltExonViewer --AltResultsDir "C:\CP-hESC" --genes "ANXA7 FYN TCF3 NAV2 ETS2 MYLK ATP2A2" --species Hs --platform exon --dataType "splicing-index"
genes=[]
show_introns='no'
geneFileDir=''
analysisType='plot'
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--genes':genes=arg
elif opt == '--dataType': data_type = arg
elif opt == '--showIntrons': show_introns = arg
elif opt == '--AltResultsDir': altresult_dir = arg
elif opt == '--geneFileDir': geneFileDir = arg
elif opt == '--analysisType': analysisType=arg
if altresult_dir == None:
print 'Please include the location of the AltResults directory (--AltResultsDir)'; sys.exit()
if len(genes)==0 and len(geneFileDir)==0:
print "Please indicate the genes (--genes) or gene file location (--geneFileDir) for AltExonViewer";sys.exit()
if species == None:
print "Please enter a valide species (--species)"; sys.exit()
if array_type == None:
print "Please enter a valide platform (--platform)"; sys.exit()
if 'AltResults' not in altresult_dir:
altresult_dir+='/AltResults/'
if 'Sashimi' in analysisType:
#python AltAnalyze.py --image AltExonViewer --AltResultsDir "/Users/saljh8/Desktop/Grimes/GEC14074/AltResults/" --genes "Dgat1 Dgat2 Tcf7l1" --species Mm --platform RNASeq --analysisType SashimiPlot
analysisType = 'Sashimi-Plot'
altresult_dir = string.split(altresult_dir,'AltResults')[0]
if len(geneFileDir)>0: genes = geneFileDir
geneFileDir=''
elif 'raw' in data_type: ### Switch directories if expression
altanalyze_results_folder = string.replace(altresult_dir,'AltResults','ExpressionInput')
altresult_dir = UI.getValidExpFile(altanalyze_results_folder)
if len(altresult_dir)==0:
print 'No valid expression input file (e.g., exp.MyExperiment.txt) found in',altanalyze_results_folder;sys.exit()
else:
altanalyze_results_folder = altresult_dir+'/RawSpliceData/'+species
try: altresult_dir = UI.getValidSplicingScoreFile(altanalyze_results_folder)
except Exception,e:
print "No files found in: "+altanalyze_results_folder; sys.exit()
if len(geneFileDir)>0:
try:
genes = UI.importGeneList(geneFileDir) ### list of gene IDs or symbols
except Exception:
### Can occur if a directory of files is selected
try:
files = unique.read_directory(geneFileDir+'/')
gene_string=''
for file in files:
if '.txt' in file:
filename = geneFileDir+'/'+file
genes = UI.importGeneList(filename) ### list of gene IDs or symbols
gene_string = gene_string+','+genes
print 'Imported genes from',file,'\n'
#print [altresult_dir];sys.exit()
UI.altExonViewer(species,platform,altresult_dir, gene_string, show_introns, analysisType, False)
except Exception: pass
sys.exit()
if len(genes)==0:
print 'Please list one or more genes (--genes "ANXA7 FYN TCF3 NAV2 ETS2 MYLK ATP2A2")'; sys.exit()
try: UI.altExonViewer(species,platform,altresult_dir, genes, show_introns, analysisType, False)
except Exception:
print traceback.format_exc()
sys.exit()
if 'network' in image_export:
#AltAnalyze.py --image network --species Hs --output "C:\GSE9440_RAW" --PathwaySelection Apoptosis:WP254 --GeneSetSelection WikiPathways
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--update_interactions': update_interactions=arg
elif opt == '--includeExpIDs': includeExpIDs=arg
elif opt == '--degrees': degrees=arg
elif opt == '--genes':
Genes=arg
inputType = 'IDs'
elif opt == '--inputType': inputType=arg
elif opt == '--interactionDirs': interactionDirs.append(arg)
elif opt == '--GeneSetSelection': GeneSetSelection=arg
elif opt == '--PathwaySelection': PathwaySelection=arg
elif opt == '--OntologyID': OntologyID=arg
elif opt == '--display': display=arg
if update_interactions == 'yes': update_interactions = True
else: update_interactions = False
if input_file_dir == None: pass
elif len(input_file_dir) == 0: input_file_dir = None
if len(input_exp_file) == 0: input_exp_file = None
if len(interactionDirs) == 0: interactionDirs=['WikiPathways']
if interactionDirs == ['all']:
interactionDirs = ['WikiPathways','KEGG','BioGRID','TFTargets','common-microRNATargets','all-microRNATargets','common-DrugBank','all-DrugBank']
if interactionDirs == ['main']:
interactionDirs = ['WikiPathways','KEGG','BioGRID','TFTargets']
if interactionDirs == ['confident']:
interactionDirs = ['WikiPathways','KEGG','TFTargets']
if len(Genes) == 0: Genes = None
if output_dir == None: pass
elif len(output_dir) == 0: output_dir = None
if len(GeneSetSelection) == 'None Selected': GeneSetSelection = None
if includeExpIDs=='yes': includeExpIDs = True
else: includeExpIDs = False
gsp = UI.GeneSelectionParameters(species,array_type,manufacturer)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(Genes)
gsp.setOntologyID(OntologyID)
gsp.setIncludeExpIDs(includeExpIDs)
root = ''
if species == None:
print 'Please designate a species (--species).'; sys.exit()
if output_dir == None:
print 'Please designate an ouput directory (--output)'; sys.exit()
if input_file_dir !=None:
if '.txt' in input_file_dir or '.sif' in input_file_dir:
UI.networkBuilder(input_file_dir,inputType,output_dir,interactionDirs,degrees,input_exp_file,gsp,root)
else:
parent_dir = input_file_dir
dir_list = read_directory(parent_dir)
for file in dir_list:
input_file_dir = parent_dir+'/'+file
try:
UI.networkBuilder(input_file_dir,inputType,output_dir,interactionDirs,degrees,input_exp_file,gsp,root)
except Exception:
print file, 'failed to produce network'
else:
UI.networkBuilder(None,inputType,output_dir,interactionDirs,degrees,input_exp_file,gsp,root)
sys.exit()
########## Begin database dependent AltAnalyze workflows
if ensembl_version != 'current' and 'markers' not in update_method:
dbversion = string.replace(ensembl_version,'EnsMart','')
UI.exportDBversion('EnsMart'+dbversion)
gene_database = unique.getCurrentGeneDatabaseVersion()
print 'Current database version:',gene_database
if array_type == None and update_dbs != 'yes' and denom_file_dir == None:
print "Please specify an array or data type (e.g., RNASeq, exon, gene, junction, AltMouse, 3'array)."; sys.exit()
if 'archive' in update_method:
###
print 'Archiving databases', ensembl_version
try: archive_dir = 'ArchiveDBs/EnsMart'+ensembl_version+'/archive'; export.createDirPath(filepath(archive_dir))
except Exception: null = [] ### directory already exists
dirs = unique.read_directory('/ArchiveDBs/EnsMart'+ensembl_version)
print len(dirs), dirs
import shutil
for species_dir in dirs:
try:
#print '/ArchiveDBs/EnsMart'+ensembl_version+'/'+species_dir+'/'+species_dir+'_RNASeq.zip'
src = filepath('ArchiveDBs/EnsMart'+ensembl_version+'/'+species_dir+'/'+species_dir+'_RNASeq.zip')
dstn = filepath('ArchiveDBs/EnsMart'+ensembl_version+'/archive/'+species_dir+'_RNASeq.zip')
#export.copyFile(src, dstn)
shutil.move(src, dstn)
try:
srcj = string.replace(src,'RNASeq.','junction.'); dstnj = string.replace(dstn,'RNASeq.','junction.')
shutil.move(srcj, dstnj)
except Exception: null=[]
try:
src = string.replace(src,'_RNASeq.','.'); dstn = string.replace(dstn,'_RNASeq.','.')
shutil.move(src, dstn)
except Exception: null=[]
except Exception: null=[]
sys.exit()
if update_dbs == 'yes' and 'Official' not in update_method:
if 'cleanup' in update_method:
existing_species_dirs = unique.read_directory('/AltDatabase/ensembl')
print 'Deleting EnsemblSQL directory for all species, ensembl version',ensembl_version
for species in existing_species_dirs:
export.deleteFolder('AltDatabase/ensembl/'+species+'/EnsemblSQL')
existing_species_dirs = unique.read_directory('/AltDatabase')
print 'Deleting SequenceData directory for all species, ensembl version',ensembl_version
for species in existing_species_dirs:
export.deleteFolder('AltDatabase/'+species+'/SequenceData')
print 'Finished...exiting'
sys.exit()
if 'package' not in update_method and 'markers' not in update_method:
### Example:
### python AltAnalyze.py --species all --arraytype all --update all --version 60
### tr -d \\r < AltAnalyze.py > AltAnalyze_new.py
### chmod +x AltAnalyze_new.py
### nohup ./AltAnalyze.py --update all --species Mm --arraytype gene --arraytype exon --version 60 2>&1 > nohup_v60_Mm.txt
if array_type == 'all' and (species == 'Mm' or species == 'all'): array_type = ['AltMouse','exon','gene','junction','RNASeq']
elif array_type == 'all' and (species == 'Hs' or species == 'Rn'): array_type = ['exon','gene','junction','RNASeq']
else: array_type = [array_type]+additional_array_types
if species == 'all' and 'RNASeq' not in array_type: species = selected_species ### just analyze the species for which multiple platforms are supported
if species == 'selected': species = selected_species ### just analyze the species for which multiple platforms are supported
elif species == 'all':
all_supported_names = {}; all_species_names={}
species_names = UI.getSpeciesInfo()
for species in species_names: all_supported_names[species_names[species]]=species
import EnsemblSQL
child_dirs, ensembl_species, ensembl_versions = EnsemblSQL.getCurrentEnsemblSpecies('release-'+ensembl_version)
for ens_species in ensembl_species:
ens_species = string.replace(ens_species,'_',' ')
if ens_species in all_supported_names:
all_species_names[all_supported_names[ens_species]]=[]
del all_species_names['Hs']
del all_species_names['Mm']
del all_species_names['Rn']
"""
del all_species_names['Go']
del all_species_names['Bt']
del all_species_names['Sc']
del all_species_names['Ss']
del all_species_names['Pv']
del all_species_names['Pt']
del all_species_names['La']
del all_species_names['Tt']
del all_species_names['Tr']
del all_species_names['Ts']
del all_species_names['Pb']
del all_species_names['Pc']
del all_species_names['Ec']
del all_species_names['Tb']
del all_species_names['Tg']
del all_species_names['Dn']
del all_species_names['Do']
del all_species_names['Tn']
del all_species_names['Dm']
del all_species_names['Oc']
del all_species_names['Og']
del all_species_names['Fc']
del all_species_names['Dr']
del all_species_names['Me']
del all_species_names['Cp']
del all_species_names['Tt']
del all_species_names['La']
del all_species_names['Tr']
del all_species_names['Ts']
del all_species_names['Et'] ### No alternative isoforms?
del all_species_names['Pc']
del all_species_names['Tb']
del all_species_names['Fc']
del all_species_names['Sc']
del all_species_names['Do']
del all_species_names['Dn']
del all_species_names['Og']
del all_species_names['Ga']
del all_species_names['Me']
del all_species_names['Ml']
del all_species_names['Mi']
del all_species_names['St']
del all_species_names['Sa']
del all_species_names['Cs']
del all_species_names['Vp']
del all_species_names['Ch']
del all_species_names['Ee']
del all_species_names['Ac']"""
sx=[]; all_species_names2=[] ### Ensure that the core selected species are run first
for species in selected_species:
if species in all_species_names: sx.append(species)
for species in all_species_names:
if species not in selected_species: all_species_names2.append(species)
all_species_names = sx+all_species_names2
species = all_species_names
else: species = [species]
update_uniprot='no'; update_ensembl='no'; update_probeset_to_ensembl='no'; update_domain='no'; update_miRs = 'no'; genomic_build = 'new'; update_miR_seq = 'yes'
if 'all' in update_method:
update_uniprot='yes'; update_ensembl='yes'; update_probeset_to_ensembl='yes'; update_domain='yes'; update_miRs = 'yes'
if 'UniProt' in update_method: update_uniprot = 'yes'
if 'Ensembl' in update_method: update_ensembl = 'yes'
if 'Probeset' in update_method or 'ExonAnnotations' in update_method: update_probeset_to_ensembl = 'yes'
if 'Domain' in update_method:
update_domain = 'yes'
try: from Bio import Entrez #test this
except Exception: print 'The dependent module Bio is not installed or not accessible through the default python interpretter. Existing AltAnalyze.'; sys.exit()
if 'miRBs' in update_method or 'miRBS' in update_method: update_miRs = 'yes'
if 'NewGenomeBuild' in update_method: genomic_build = 'new'
if 'current' in ensembl_version: print "Please specify an Ensembl version number (e.g., 60) before proceeding with the update.";sys.exit()
try: force = force ### Variable is not declared otherwise
except Exception: force = 'yes'; print 'force:',force
existing_species_dirs={}
update_all = 'no' ### We don't pass this as yes, in order to skip certain steps when multiple array types are analyzed (others are specified above)
try: print "Updating AltDatabase the following array_types",string.join(array_type),"for the species",string.join(species)
except Exception: print 'Please designate a valid platform/array_type (e.g., exon) and species code (e.g., Mm).'
for specific_species in species:
for platform_name in array_type:
if platform_name == 'AltMouse' and specific_species == 'Mm': proceed = 'yes'
elif platform_name == 'exon' or platform_name == 'gene':
import ExonArrayEnsemblRules
#### Check to see if the probeset.csv file is present
#try: probeset_transcript_file = ExonArrayEnsemblRules.getDirectoryFiles('/AltDatabase/'+specific_species+'/'+platform_name)
#except Exception: print "Affymetrix probeset.csv anotation file is not found. You must save this to",'/AltDatabase/'+specific_species+'/'+platform_name,'before updating (unzipped).'; sys.exit()
proceed = 'yes'
elif platform_name == 'junction' and (specific_species == 'Hs' or specific_species == 'Mm'): proceed = 'yes'
elif platform_name == 'RNASeq': proceed = 'yes'
else: proceed = 'no'
if proceed == 'yes':
print "Analyzing", specific_species, platform_name
if (platform_name != array_type[0]) and len(species)==1:
update_uniprot = 'no'; update_ensembl = 'no'; update_miR_seq = 'no' ### Don't need to do this twice in a row
print 'Skipping ensembl, uniprot and mir-sequence file import updates since already completed for this species',array_type,platform_name
if ignore_built_species == 'yes': ### Useful for when building all species for a new database build
existing_species_dirs = unique.read_directory('/AltDatabase/ensembl') ### call this here to update with every species - if running multiple instances
if specific_array_type != None and specific_array_type != platform_name: platform_name+='|'+specific_array_type ### For the hGlue vs. JAY arrays
if specific_species not in existing_species_dirs: ### Useful when running multiple instances of AltAnalyze to build all species
print 'update_ensembl',update_ensembl
print 'update_uniprot',update_uniprot
print 'update_probeset_to_ensembl',update_probeset_to_ensembl
print 'update_domain',update_domain
print 'update_miRs',update_miRs
update.executeParameters(specific_species,platform_name,force,genomic_build,update_uniprot,update_ensembl,update_probeset_to_ensembl,update_domain,update_miRs,update_all,update_miR_seq,ensembl_version)
else: print 'ignoring',specific_species
sys.exit()
if 'package' in update_method:
### Example: python AltAnalyze.py --update package --species all --platform all --version 65
if ensembl_version == 'current': print '\nPlease specify version of the database to package (e.g., --version 60).'; sys.exit()
ensembl_version = 'EnsMart'+ensembl_version
### Get all possible species
species_names = UI.getSpeciesInfo(); possible_species={}
possible_species = species_names
possible_arrays = ['exon','gene','junction','AltMouse','RNASeq']
try:
if species == 'all': possible_species = possible_species
elif species == 'selected': possible_species = selected_species
else: possible_species = [species]
except Exception: species = possible_species
if array_type == None or array_type == 'all': possible_arrays = possible_arrays
else: possible_arrays = [array_type]+additional_array_types
species_to_package={}
dirs = unique.read_directory('/AltDatabase/'+ensembl_version)
#print possible_arrays, possible_species; sys.exit()
for species_code in dirs:
if species_code in possible_species:
array_types = unique.read_directory('/AltDatabase/'+ensembl_version+'/'+species_code)
for arraytype in array_types:
if arraytype in possible_arrays:
if species_code in possible_species:
array_types = unique.read_directory('/AltDatabase/'+ensembl_version+'/'+species_code)
try: species_to_package[species_code].append(arraytype)
except Exception: species_to_package[species_code] = [arraytype]
species_to_package = eliminate_redundant_dict_values(species_to_package)
for species in species_to_package:
files_to_copy =[species+'_Ensembl_domain_aligning_probesets.txt']
files_to_copy+=[species+'_Ensembl_indirect_domain_aligning_probesets.txt']
files_to_copy+=[species+'_Ensembl_probesets.txt']
files_to_copy+=[species+'_Ensembl_exons.txt']
#files_to_copy+=[species+'_Ensembl_junctions.txt']
files_to_copy+=[species+'_exon_core.mps']
files_to_copy+=[species+'_exon_extended.mps']
files_to_copy+=[species+'_exon_full.mps']
files_to_copy+=[species+'_gene_core.mps']
files_to_copy+=[species+'_gene_extended.mps']
files_to_copy+=[species+'_gene_full.mps']
files_to_copy+=[species+'_gene-exon_probesets.txt']
files_to_copy+=[species+'_probes_to_remove.txt']
files_to_copy+=[species+'_probeset-probes.txt']
files_to_copy+=[species+'_probeset_microRNAs_any.txt']
files_to_copy+=[species+'_probeset_microRNAs_multiple.txt']
files_to_copy+=['probeset-domain-annotations-exoncomp.txt']
files_to_copy+=['probeset-protein-annotations-exoncomp.txt']
#files_to_copy+=['probeset-protein-dbase_exoncomp.txt']
files_to_copy+=['SEQUENCE-protein-dbase_exoncomp.txt']
files_to_copy+=[species+'_Ensembl_junction_probesets.txt']
files_to_copy+=[species+'_Ensembl_AltMouse_probesets.txt']
files_to_copy+=[species+'_RNASeq-exon_probesets.txt']
files_to_copy+=[species+'_junction-exon_probesets.txt']
files_to_copy+=[species+'_junction_all.mps']
files_to_copy+=['platform.txt'] ### Indicates the specific platform for an array type (e.g., HJAY for junction or hGlue for junction)
files_to_copy+=[species+'_junction_comps_updated.txt']
files_to_copy+=['MASTER-probeset-transcript.txt']
files_to_copy+=['AltMouse-Ensembl.txt']
files_to_copy+=['AltMouse_junction-comparisons.txt']
files_to_copy+=['AltMouse_gene_annotations.txt']
files_to_copy+=['AltMouse_annotations.txt']
common_to_copy =['uniprot/'+species+'/custom_annotations.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl-annotations_simple.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl-annotations.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_microRNA-Ensembl.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl_transcript-biotypes.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt']
common_to_copy+= searchDirectory("AltDatabase/ensembl/"+species+"/",'Ensembl_Protein')
common_to_copy+= searchDirectory("AltDatabase/ensembl/"+species+"/",'ProteinFeatures')
common_to_copy+= searchDirectory("AltDatabase/ensembl/"+species+"/",'ProteinCoordinates')
common_to_copy+= searchDirectory("AltDatabase/uniprot/"+species+"/",'FeatureCoordinate')
supported_arrays_present = 'no'
for arraytype in selected_platforms:
if arraytype in species_to_package[species]: supported_arrays_present = 'yes' #Hence a non-RNASeq platform is present
if supported_arrays_present == 'yes':
for file in common_to_copy:
ir = 'AltDatabase/'+ensembl_version+'/'
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version+'/'
export.copyFile(ir+file, er+file)
if 'RNASeq' in species_to_package[species]:
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl_junction.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl_exon.txt']
for file in common_to_copy:
ir = 'AltDatabase/'+ensembl_version+'/'
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version+'/'
if species in selected_species:
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/RNASeq/'+ensembl_version+'/' ### This allows us to build the package archive in a separate directory for selected species, so separate but overlapping content can be packaged
export.copyFile(ir+file, er+file)
for array_type in species_to_package[species]:
ir = 'AltDatabase/'+ensembl_version+'/'+species+'/'+array_type+'/'
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version+'/'+species+'/'+array_type+'/'
if array_type == 'junction':
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+array_type+'/'
if array_type == 'RNASeq' and species in selected_species:
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/RNASeq/'+ensembl_version+'/'+species+'/'+array_type+'/'
for file in files_to_copy:
if array_type == 'RNASeq': file=string.replace(file,'_updated.txt','.txt')
filt_file = string.replace(file ,'.txt','-filtered.txt')
try: export.copyFile(ir+filt_file, er+filt_file); export_path = er+filt_file
except Exception:
try: export.copyFile(ir+file, er+file); export_path = er+file
except Exception: null = [] ### File not found in directory
if len(export_path)>0:
if 'AltMouse' in export_path or 'probes_' in export_path:
export.cleanFile(export_path)
if array_type == 'junction':
subdir = '/exon/'
ir = 'AltDatabase/'+ensembl_version+'/'+species+'/'+array_type+subdir
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+array_type+subdir
for file in files_to_copy:
export_path=[]
filt_file = string.replace(file ,'.txt','-filtered.txt')
try: export.copyFile(ir+filt_file, er+filt_file); export_path = er+filt_file
except Exception:
try: export.copyFile(ir+file, er+file); export_path = er+file
except Exception: null = [] ### File not found in directory
if array_type == 'RNASeq':
subdir = '/junction/'
ir = 'AltDatabase/'+ensembl_version+'/'+species+'/'+array_type+subdir
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version+'/'+species+'/'+array_type+subdir
if species in selected_species:
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/RNASeq/'+ensembl_version+'/'+species+'/'+array_type+subdir
for file in files_to_copy:
if 'SEQUENCE-protein-dbase' not in file and 'domain_aligning' not in file: ### This data is now combined into the main file
export_path=[]
filt_file = string.replace(file ,'.txt','-filtered.txt')
try: export.copyFile(ir+filt_file, er+filt_file); export_path = er+filt_file
except Exception:
try: export.copyFile(ir+file, er+file); export_path = er+file
except Exception: null = [] ### File not found in directory
if 'RNASeq' in species_to_package[species]:
src = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version
dst = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+species+'_RNASeq.zip'
if species in selected_species:
src = 'ArchiveDBs/'+ensembl_version+'/'+species+'/RNASeq/'+ensembl_version
update.zipDirectory(src); print 'Zipping',species, array_type, dst
os.rename(src+'.zip', dst)
if supported_arrays_present == 'yes':
src = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version
dst = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+species+'.zip'
update.zipDirectory(src); print 'Zipping',species, array_type, dst
os.rename(src+'.zip', dst)
if 'junction' in species_to_package[species]:
src = 'ArchiveDBs/'+ensembl_version+'/'+species+'/junction'
dst = string.replace(src,'junction',species+'_junction.zip')
update.zipDirectory(src); print 'Zipping',species+'_junction'
os.rename(src+'.zip', dst)
sys.exit()
if 'markers' in update_method:
if species == None or platform == None:
print "WARNING! A species and platform (e.g., exon, junction, 3'array or RNASeq) must be defined to identify markers.";sys.exit()
elif input_exp_file == '':
print "WARNING! A input expression file must be supplied (e.g., ExpressionOutput/DATASET.YourExperimentName.txt) for this analysis.";sys.exit()
else:
#python AltAnalyze.py --update markers --platform gene --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/Mm_Gene-TissueAtlas/ExpressionInput/exp.meta.txt"
#python AltAnalyze.py --update markers --platform gene --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/Mm_Gene-TissueAtlas/AltResults/RawSpliceData/Mm/splicing-index/meta.txt"
#python AltAnalyze.py --update markers --platform "3'array" --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/U133/ExpressionOutput/DATASET-meta.txt"
#python AltAnalyze.py --update markers --compendiumType ncRNA --platform "exon" --expdir "/home/socr/c/users2/salomoni/conklin/nsalomonis/normalization/Hs_Exon-TissueAtlas/ExpressionOutput/DATASET-meta.txt"
#python AltAnalyze.py --update markers --platform RNASeq --species Mm --geneRPKM 1 --expdir /Users/saljh8/Desktop/Grimes/MergedRSEM/DN-Analysis/ExpressionInput/exp.DN.txt --genesToReport 200
"""The markerFinder module:
1) takes an input ExpressionOutput file (DATASET.YourExperimentName.txt)
2) extracts group average expression and saves to AVERAGE.YourExperimentName.txt to the ExpressionOutput directory
3) re-imports AVERAGE.YourExperimentName.txt
4) correlates the average expression of each gene to an idealized profile to derive a Pearson correlation coefficient
5) identifies optimal markers based on these correlations for each tissue
6) exports an expression file with just these marker genes and tissues
This module can peform these analyses on protein coding or ncRNAs and can segregate the cell/tissue groups into clusters
when a group notation is present in the sample name (e.g., 0~Heart, 0~Brain, 1~Stem Cell)"""
import markerFinder
if 'AltResults' in input_exp_file and 'Clustering' not in input_exp_file:
### This applies to a file compoosed of exon-level normalized intensities (calculae average group expression)
markerFinder.getAverageExonExpression(species,platform,input_exp_file)
if 'Raw' in input_exp_file:
group_exp_file = string.replace(input_exp_file,'Raw','AVERAGE')
else:
group_exp_file = string.replace(input_exp_file,'FullDatasets','AVERAGE-FullDatasets')
altexon_correlation_file = markerFinder.analyzeData(group_exp_file,species,platform,compendiumType,geneToReport=genesToReport,correlateAll=correlateAll,AdditionalParameters=fl)
markerFinder.getExprValsForNICorrelations(platform,altexon_correlation_file,group_exp_file)
else:
### This applies to an ExpressionOutput DATASET file compoosed of gene expression values (averages already present)
import collections
try: test_ordereddict=collections.OrderedDict()
except Exception:
try: import ordereddict
except Exception:
### This is needed to re-order the average file so that the groups are sequentially ordered when analyzing clustered groups (0~)
print 'Warning!!!! To run markerFinder correctly call python version 2.7x or greater (python 3.x not supported)'
print 'Requires ordereddict (also can install the library ordereddict). To call 2.7: /usr/bin/python2.7'
sys.exit()
try:
output_dir = markerFinder.getAverageExpressionValues(input_exp_file,platform) ### Either way, make an average annotated file from the DATASET file
if 'DATASET' in input_exp_file:
group_exp_file = string.replace(input_exp_file,'DATASET','AVERAGE')
else:
group_exp_file = (input_exp_file,output_dir) ### still analyze the primary sample
except Exception:
### Work around when performing this analysis on an alternative exon input cluster file
group_exp_file = input_exp_file
fl = UI.ExpressionFileLocationData(input_exp_file,'','',''); fl.setOutputDir(export.findParentDir(export.findParentDir(input_exp_file)[:-1]))
try: fl.setSpecies(species); fl.setVendor(vendor)
except Exception: pass
try:
rpkm_threshold = float(rpkm_threshold) ### If supplied, for any platform, use it
fl.setRPKMThreshold(rpkm_threshold)
except Exception: pass
if platform=='RNASeq':
try: rpkm_threshold = float(rpkm_threshold)
except Exception: rpkm_threshold = 1.0
fl.setRPKMThreshold(rpkm_threshold)
try: correlationDirection = direction ### correlate to a positive or inverse negative in silico artificial pattern
except Exception: correlationDirection = 'up'
fl.setCorrelationDirection(correlationDirection)
if expression_data_format == 'non-log': logTransform = True
else: logTransform = False
if 'topSplice' in input_exp_file:
markerFinder.filterRNASeqSpliceEvents(species,platform,fl,input_exp_file)
sys.exit()
if 'stats.' in input_exp_file:
markerFinder.filterDetectionPvalues(species,platform,fl,input_exp_file)
sys.exit()
else:
markerFinder.analyzeData(group_exp_file,species,platform,compendiumType,geneToReport=genesToReport,correlateAll=correlateAll,AdditionalParameters=fl,logTransform=logTransform)
try: fl.setVendor(manufacturer)
except Exception:
print '--vendor not indicated by user... assuming Affymetrix'
fl.setVendor('Affymetrix')
try: markerFinder.generateMarkerHeatMaps(fl,array_type,convertNonLogToLog=logTransform,Species=species)
except Exception: print traceback.format_exc()
print 'Cell/Tissue marker classification analysis finished';sys.exit()
if 'EnsMart' in ensembl_version:
UI.exportDBversion(ensembl_version)
annotation_found = verifyFile(input_annotation_file)
proceed = 'no'
if 'Official' not in update_method and denom_file_dir == None: ### If running GO-Elite independent of AltAnalyze (see below GO_Elite call)
try:
time_stamp = timestamp()
if len(cel_file_dir)>0:
if output_dir == None:
output_dir = cel_file_dir
print "Setting output directory to the input path:", output_dir
if output_dir == None and input_filtered_dir>0:
output_dir = input_filtered_dir
if '/' == output_dir[-1] or '\\' in output_dir[-2]: null=[]
else: output_dir +='/'
log_file = filepath(output_dir+'AltAnalyze_report-'+time_stamp+'.log')
log_report = open(log_file,'w'); log_report.close()
sys.stdout = Logger('')
except Exception,e:
print e
print 'Please designate an output directory before proceeding (e.g., --output "C:\RNASeq)';sys.exit()
if mappedExonAnalysis:
array_type = 'RNASeq' ### Although this is not the actual platform, the resulting data will be treated as RNA-Seq with parameters most suitable for arrays
if len(external_annotation_dir)>0:
run_from_scratch = 'Annotate External Results'
if channel_to_extract != None:
run_from_scratch = 'Process Feature Extraction files' ### Agilent Feature Extraction files as input for normalization
manufacturer = 'Agilent'
constitutive_source = 'Agilent'
expression_threshold = 'NA'
perform_alt_analysis = 'NA'
if len(input_filtered_dir)>0:
run_from_scratch ='Process AltAnalyze filtered'; proceed='yes'
if len(input_exp_file)>0:
run_from_scratch = 'Process Expression file'; proceed='yes'
input_exp_file = string.replace(input_exp_file,'\\','/') ### Windows convention is \ rather than /, but works with /
ief_list = string.split(input_exp_file,'/')
if len(output_dir)>0: parent_dir = output_dir
else: parent_dir = string.join(ief_list[:-1],'/')
exp_name = ief_list[-1]
if len(cel_file_dir)>0 or runKallisto == True:
# python AltAnalyze.py --species Mm --platform RNASeq --runKallisto yes --expname test
if exp_name == None:
print "No experiment name defined. Please sumbit a name (e.g., --expname CancerComp) before proceeding."; sys.exit()
else:
dataset_name = 'exp.'+exp_name+'.txt'; exp_file_dir = filepath(output_dir+'/ExpressionInput/'+dataset_name)
if runKallisto:
run_from_scratch == 'Process RNA-seq reads'
elif run_from_scratch!= 'Process Feature Extraction files':
run_from_scratch = 'Process CEL files'; proceed='yes'
if array_type == 'RNASeq': file_ext = '.BED'
else: file_ext = '.CEL'
try: cel_files,cel_files_fn = UI.identifyCELfiles(cel_file_dir,array_type,manufacturer)
except Exception,e:
print e
if mappedExonAnalysis: pass
else: print "No",file_ext,"files found in the directory:",cel_file_dir;sys.exit()
if array_type != 'RNASeq': cel_file_list_dir = UI.exportCELFileList(cel_files_fn,cel_file_dir)
if groups_file != None and comps_file != None:
try: export.copyFile(groups_file, string.replace(exp_file_dir,'exp.','groups.'))
except Exception: print 'Groups file already present in target location OR bad input path.'
try: export.copyFile(comps_file, string.replace(exp_file_dir,'exp.','comps.'))
except Exception: print 'Comparison file already present in target location OR bad input path.'
groups_file = string.replace(exp_file_dir,'exp.','groups.')
comps_file = string.replace(exp_file_dir,'exp.','comps.')
if verifyGroupFileFormat(groups_file) == False:
print "\nWarning! The format of your groups file is not correct. For details, see:\nhttp://code.google.com/p/altanalyze/wiki/ManualGroupsCompsCreation\n"
sys.exit()
if array_type != 'RNASeq' and manufacturer!= 'Agilent':
"""Determine if Library and Annotations for the array exist, if not, download or prompt for selection"""
try:
### For the HGLUE and HJAY arrays, this step is critical in order to have the commond-line AltAnalyze downloadthe appropriate junction database (determined from specific_array_type)
specific_array_types,specific_array_type = UI.identifyArrayType(cel_files_fn)
num_array_types = len(specific_array_types)
except Exception:
null=[]; num_array_types=1; specific_array_type=None
if array_type == 'exon':
if species == 'Hs': specific_array_type = 'HuEx-1_0-st-v2'
if species == 'Mm': specific_array_type = 'MoEx-1_0-st-v2'
if species == 'Rn': specific_array_type = 'RaEx-1_0-st-v2'
elif array_type == 'gene':
if species == 'Hs': specific_array_type = 'HuGene-1_0-st-v1'
if species == 'Mm': specific_array_type = 'MoGene-1_0-st-v1'
if species == 'Rn': specific_array_type = 'RaGene-1_0-st-v1'
elif array_type == 'AltMouse': specific_array_type = 'altMouseA'
"""
elif array_type == 'junction':
if species == 'Mm': specific_array_type = 'MJAY'
if species == 'Hs': specific_array_type = 'HJAY'
"""
supproted_array_db = UI.importSupportedArrayInfo()
if specific_array_type in supproted_array_db and input_cdf_file == None and input_annotation_file == None:
sa = supproted_array_db[specific_array_type]; species = sa.Species(); array_type = sa.ArrayType()
input_cdf_file, input_annotation_file, bgp_file, clf_file = UI.getAffyFilesRemote(specific_array_type,array_type,species)
else: array_type = "3'array"
cdf_found = verifyFile(input_cdf_file)
annotation_found = verifyFile(input_annotation_file)
if input_cdf_file == None:
print [specific_array_type], 'not currently supported... Please provide CDF to AltAnalyze (commandline or GUI) or manually add to AltDatabase/affymetrix/LibraryFiles'; sys.exit()
if cdf_found != "found":
### Copy valid Library files to a local AltAnalyze database directory
input_cdf_file_lower = string.lower(input_cdf_file)
if array_type == "3'array":
if '.cdf' in input_cdf_file_lower:
clf_file='';bgp_file=''; assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
icf_list = string.split(input_cdf_file,'/'); cdf_short = icf_list[-1]
destination_parent = 'AltDatabase/affymetrix/LibraryFiles/'
destination_parent = osfilepath(destination_parent+cdf_short)
info_list = input_cdf_file,destination_parent; UI.StatusWindow(info_list,'copy')
else: print "Valid CDF file not found. Exiting program.";sys.exit()
else:
if '.pgf' in input_cdf_file_lower:
###Check to see if the clf and bgp files are present in this directory
icf_list = string.split(input_cdf_file,'/'); parent_dir = string.join(icf_list[:-1],'/'); cdf_short = icf_list[-1]
clf_short = string.replace(cdf_short,'.pgf','.clf')
kil_short = string.replace(cdf_short,'.pgf','.kil') ### Only applies to the Glue array
if array_type == 'exon' or array_type == 'junction': bgp_short = string.replace(cdf_short,'.pgf','.antigenomic.bgp')
else: bgp_short = string.replace(cdf_short,'.pgf','.bgp')
dir_list = read_directory(parent_dir)
if clf_short in dir_list and bgp_short in dir_list:
pgf_file = input_cdf_file
clf_file = string.replace(pgf_file,'.pgf','.clf')
kil_file = string.replace(pgf_file,'.pgf','.kil') ### Only applies to the Glue array
if array_type == 'exon' or array_type == 'junction': bgp_file = string.replace(pgf_file,'.pgf','.antigenomic.bgp')
else: bgp_file = string.replace(pgf_file,'.pgf','.bgp')
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
destination_parent = 'AltDatabase/affymetrix/LibraryFiles/'
info_list = input_cdf_file,osfilepath(destination_parent+cdf_short); UI.StatusWindow(info_list,'copy')
info_list = clf_file,osfilepath(destination_parent+clf_short); UI.StatusWindow(info_list,'copy')
info_list = bgp_file,osfilepath(destination_parent+bgp_short); UI.StatusWindow(info_list,'copy')
if 'Glue' in pgf_file:
info_list = kil_file,osfilepath(destination_parent+kil_short); UI.StatusWindow(info_list,'copy')
if annotation_found != "found" and update_dbs == 'no' and array_type != 'RNASeq' and denom_file_dir == None and manufacturer != 'Agilent':
### Copy valid Annotation files to a local AltAnalyze database directory
try:
input_annotation_lower = string.lower(input_annotation_file)
if '.csv' in input_annotation_lower:
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
icf_list = string.split(input_annotation_file,'/'); csv_short = icf_list[-1]
destination_parent = 'AltDatabase/affymetrix/'+species+'/'
info_list = input_annotation_file,filepath(destination_parent+csv_short); UI.StatusWindow(info_list,'copy')
except Exception: print "No Affymetrix annotation file provided. AltAnalyze will use any .csv annotations files in AltDatabase/Affymetrix/"+species
if 'Official' in update_method and species != None:
proceed = 'yes'
elif array_type != None and species != None:
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults(array_type,species)
ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, ORA_algorithm, resources_to_analyze, goelite_permutations, mod, returnPathways, NA = goelite_defaults
use_direct_domain_alignments_only,microRNA_prediction_method = functional_analysis_defaults
analysis_method, additional_algorithms, filter_probeset_types, analyze_all_conditions, p_threshold, alt_exon_fold_variable, additional_score, permute_p_threshold, gene_expression_cutoff, remove_intronic_junctions, perform_permutation_analysis, export_NI_values, run_MiDAS, calculate_normIntensity_p, filter_for_AS = alt_exon_defaults
dabg_p, rpkm_threshold, gene_exp_threshold, exon_exp_threshold, exon_rpkm_threshold, expression_threshold, perform_alt_analysis, analyze_as_groups, expression_data_format, normalize_feature_exp, normalize_gene_data, avg_all_for_ss, include_raw_data, probability_statistic, FDR_statistic, batch_effects, marker_finder, visualize_qc_results, run_lineage_profiler, null = expr_defaults
elif denom_file_dir != None and species != None:
proceed = 'yes' ### Only run GO-Elite
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults('RNASeq',species) ### platform not relevant
ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, ORA_algorithm, resources_to_analyze, goelite_permutations, mod, returnPathways, NA = goelite_defaults
else:
print 'No species defined. Please include the species code (e.g., "--species Hs") and array type (e.g., "--arraytype exon") before proceeding.'
print '\nAlso check the printed arguments above to see if there are formatting errors, such as bad quotes.'; sys.exit()
array_type_original = array_type
#if array_type == 'gene': array_type = "3'array"
for opt, arg in options:
if opt == '--runGOElite': run_GOElite=arg
elif opt == '--outputQCPlots': visualize_qc_results=arg
elif opt == '--runLineageProfiler': run_lineage_profiler=arg
elif opt == '--elitepermut': goelite_permutations=arg
elif opt == '--method': filter_method=arg
elif opt == '--zscore': z_threshold=arg
elif opt == '--elitepval': p_val_threshold=arg
elif opt == '--num': change_threshold=arg
elif opt == '--dataToAnalyze': resources_to_analyze=arg
elif opt == '--GEelitepval': ge_pvalue_cutoffs=arg
elif opt == '--GEelitefold': ge_fold_cutoffs=arg
elif opt == '--GEeliteptype': ge_ptype=arg
elif opt == '--ORAstat': ORA_algorithm=arg
elif opt == '--returnPathways': returnPathways=arg
elif opt == '--FDR': FDR_statistic=arg
elif opt == '--dabgp': dabg_p=arg
elif opt == '--rawexp': expression_threshold=arg
elif opt == '--geneRPKM': rpkm_threshold=arg
elif opt == '--exonRPKM': exon_rpkm_threshold=arg
elif opt == '--geneExp': gene_exp_threshold=arg
elif opt == '--exonExp': exon_exp_threshold=arg
elif opt == '--groupStat': probability_statistic=arg
elif opt == '--avgallss': avg_all_for_ss=arg
elif opt == '--logexp': expression_data_format=arg
elif opt == '--inclraw': include_raw_data=arg
elif opt == '--combat': batch_effects=arg
elif opt == '--runalt': perform_alt_analysis=arg
elif opt == '--altmethod': analysis_method=arg
elif opt == '--altp': p_threshold=arg
elif opt == '--probetype': filter_probeset_types=arg
elif opt == '--altscore': alt_exon_fold_variable=arg
elif opt == '--GEcutoff': gene_expression_cutoff=arg
elif opt == '--removeIntronOnlyJunctions': remove_intronic_junctions=arg
elif opt == '--normCounts': normalize_feature_exp=arg
elif opt == '--normMatrix': normalize_gene_data=arg
elif opt == '--altpermutep': permute_p_threshold=arg
elif opt == '--altpermute': perform_permutation_analysis=arg
elif opt == '--exportnormexp': export_NI_values=arg
elif opt == '--buildExonExportFile': build_exon_bedfile = 'yes'
elif opt == '--runMarkerFinder': marker_finder = arg
elif opt == '--calcNIp': calculate_normIntensity_p=arg
elif opt == '--runMiDAS': run_MiDAS=arg
elif opt == '--analyzeAllGroups':
analyze_all_conditions=arg
if analyze_all_conditions == 'yes': analyze_all_conditions = 'all groups'
elif opt == '--GEcutoff': use_direct_domain_alignments_only=arg
elif opt == '--mirmethod': microRNA_prediction_method=arg
elif opt == '--ASfilter': filter_for_AS=arg
elif opt == '--noxhyb': xhyb_remove=arg
elif opt == '--returnAll': return_all=arg
elif opt == '--annotatedir': external_annotation_dir=arg
elif opt == '--additionalScore': additional_score=arg
elif opt == '--additionalAlgorithm': additional_algorithms=arg
elif opt == '--modelSize':
modelSize=arg
try: modelSize = int(modelSize)
except Exception: modelSize = None
elif opt == '--geneModel':
geneModel=arg # file location
if geneModel == 'no' or 'alse' in geneModel:
geneModel = False
elif opt == '--reference':
custom_reference = arg
if run_from_scratch == 'Process Feature Extraction files': ### Agilent Feature Extraction files as input for normalization
normalize_gene_data = 'quantile' ### required for Agilent
proceed = 'yes'
if returnPathways == 'no' or returnPathways == 'None':
returnPathways = None
if pipelineAnalysis == False:
proceed = 'yes'
if proceed == 'yes':
species_codes = UI.remoteSpeciesInfo()
### Update Ensembl Databases
if 'Official' in update_method:
file_location_defaults = UI.importDefaultFileLocations()
db_versions_vendors,db_versions = UI.remoteOnlineDatabaseVersions()
array_codes = UI.remoteArrayInfo()
UI.getOnlineDBConfig(file_location_defaults,'')
if len(species)==2:
species_names = UI.getSpeciesInfo()
species_full = species_names[species]
else: species_full = species
print 'Species name to update:',species_full
db_version_list=[]
for version in db_versions: db_version_list.append(version)
db_version_list.sort(); db_version_list.reverse(); select_version = db_version_list[0]
db_versions[select_version].sort()
print 'Ensembl version',ensembl_version
if ensembl_version != 'current':
if len(ensembl_version) < 4: ensembl_version = 'EnsMart'+ensembl_version
if ensembl_version not in db_versions:
try: UI.getOnlineEliteDatabase(file_location_defaults,ensembl_version,[species],'no',''); sys.exit()
except Exception:
### This is only for database that aren't officially released yet for prototyping
print ensembl_version, 'is not a valid version of Ensembl, while',select_version, 'is.'; sys.exit()
else: select_version = ensembl_version
### Export basic species information
sc = species; db_version = ensembl_version
if sc != None:
for ad in db_versions_vendors[db_version]:
if ad.SpeciesCodes() == species_full:
for array_system in array_codes:
ac = array_codes[array_system]
compatible_species = ac.SpeciesCodes()
if ac.Manufacturer() in ad.Manufacturer() and ('expression' in ac.ArrayName() or 'RNASeq' in ac.ArrayName() or 'RNA-seq' in ac.ArrayName()):
if sc not in compatible_species: compatible_species.append(sc)
ac.setSpeciesCodes(compatible_species)
UI.exportArrayInfo(array_codes)
if species_full not in db_versions[select_version]:
print db_versions[select_version]
print species_full, ': This species is not available for this version %s of the Official database.' % select_version
else:
update_goelite_resources = 'no' ### This is handled separately below
UI.getOnlineEliteDatabase(file_location_defaults,ensembl_version,[species],update_goelite_resources,'');
### Attempt to download additional Ontologies and GeneSets
if additional_resources[0] != None: ### Indicates that the user requested the download of addition GO-Elite resources
try:
import GeneSetDownloader
print 'Adding supplemental GeneSet and Ontology Collections'
if 'all' in additional_resources:
additionalResources = UI.importResourceList() ### Get's all additional possible resources
else: additionalResources = additional_resources
GeneSetDownloader.buildAccessoryPathwayDatabases([species],additionalResources,'yes')
print 'Finished adding additional analysis resources.'
except Exception:
print 'Download error encountered for additional Ontologies and GeneSets...\nplease try again later.'
status = UI.verifyLineageProfilerDatabases(species,'command-line')
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...'
if array_type == 'junction' or array_type == 'RNASeq': ### Download junction databases
try: UI.checkForLocalArraySupport(species,array_type,specific_array_type,'command-line')
except Exception:
print 'Please install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart65';sys.exit()
status = UI.verifyLineageProfilerDatabases(species,'command-line')
print "Finished adding database"
sys.exit()
try:
#print ge_fold_cutoffs,ge_pvalue_cutoffs, change_threshold, resources_to_analyze, goelite_permutations, p_val_threshold, z_threshold
change_threshold = int(change_threshold)-1
goelite_permutations = int(goelite_permutations);change_threshold = change_threshold
p_val_threshold = float(p_val_threshold); z_threshold = float(z_threshold)
if ORA_algorithm == 'Fisher Exact Test':
goelite_permutations = 'FisherExactTest'
except Exception,e:
print e
print 'One of the GO-Elite input values is inapporpriate. Please review and correct.';sys.exit()
if run_GOElite == None or run_GOElite == 'no': goelite_permutations = 'NA' ### This haults GO-Elite from running
else:
if output_dir == None:
print "\nPlease specify an output directory using the flag --output"; sys.exit()
try: expression_threshold = float(expression_threshold)
except Exception: expression_threshold = 1
try: dabg_p = float(dabg_p)
except Exception: dabg_p = 1 ### Occurs for RNASeq
if microRNA_prediction_method == 'two or more': microRNA_prediction_method = 'multiple'
else: microRNA_prediction_method = 'any'
### Run GO-Elite directly from user supplied input and denominator ID folders (outside of the normal workflows)
if run_GOElite == 'yes' and pipelineAnalysis == False and '--runGOElite' in arguments:# and denom_file_dir != None:
#python AltAnalyze.py --input "/Users/nsalomonis/Desktop/Mm_sample/input_list_small" --runGOElite yes --denom "/Users/nsalomonis/Desktop/Mm_sample/denominator" --mod Ensembl --species Mm
"""if denom_file_dir == None:
print 'Please include a folder containing a valid denominator ID list for the input ID sets.'; sys.exit()"""
try:
if output_dir==None:
### Set output to the same directory or parent if none selected
i = -1 ### 1 directory up
output_dir = string.join(string.split(input_file_dir,'/')[:i],'/')
file_dirs = input_file_dir, denom_file_dir, output_dir
import GO_Elite
if ORA_algorithm == 'Fisher Exact Test':
goelite_permutations = 'FisherExactTest'
goelite_var = species,mod,goelite_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,''
GO_Elite.remoteAnalysis(goelite_var,'non-UI',Multi=mlp)
sys.exit()
except Exception:
print traceback.format_exc()
print "Unexpected error encountered. Please see log file."; sys.exit()
if run_lineage_profiler == 'yes':
status = UI.verifyLineageProfilerDatabases(species,'command-line')
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...'
if run_lineage_profiler == 'yes' and input_file_dir != None and pipelineAnalysis == False and '--runLineageProfiler' in arguments:
#python AltAnalyze.py --input "/Users/arrays/test.txt" --runLineageProfiler yes --vendor Affymetrix --platform "3'array" --species Mm --output "/Users/nsalomonis/Merrill"
#python AltAnalyze.py --input "/Users/qPCR/samples.txt" --runLineageProfiler yes --geneModel "/Users/qPCR/models.txt"
if array_type==None:
print "Please include a platform name (e.g., --platform RNASeq)";sys.exit()
if species==None:
print "Please include a species name (e.g., --species Hs)";sys.exit()
try:
status = UI.verifyLineageProfilerDatabases(species,'command-line')
except ValueError:
### Occurs due to if int(gene_database[-2:]) < 65: - ValueError: invalid literal for int() with base 10: ''
print '\nPlease install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart65\n';sys.exit()
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...';sys.exit()
try:
fl = UI.ExpressionFileLocationData('','','','')
fl.setSpecies(species)
fl.setVendor(manufacturer)
fl.setPlatformType(array_type)
fl.setCompendiumType('protein_coding')
#fl.setCompendiumType('AltExon')
fl.setCompendiumPlatform(array_type)
try: expr_input_dir
except Exception: expr_input_dir = input_file_dir
UI.remoteLP(fl, expr_input_dir, manufacturer, custom_reference, geneModel, None, modelSize=modelSize)
#graphic_links = ExpressionBuilder.remoteLineageProfiler(fl,input_file_dir,array_type,species,manufacturer)
print_out = 'Lineage profiles and images saved to the folder "DataPlots" in the input file folder.'
print print_out
except Exception:
print traceback.format_exc()
print_out = 'Analysis error occured...\nplease see warning printouts.'
print print_out
sys.exit()
if array_type == 'junction' or array_type == 'RNASeq': ### Download junction databases
try: UI.checkForLocalArraySupport(species,array_type,specific_array_type,'command-line')
except Exception:
print 'Please install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart65';sys.exit()
probeset_types = ['full','core','extended']
if return_all == 'yes': ### Perform no alternative exon filtering when annotating existing FIRMA or MADS results
dabg_p = 1; expression_threshold = 1; p_threshold = 1; alt_exon_fold_variable = 1
gene_expression_cutoff = 10000; filter_probeset_types = 'full'; exon_exp_threshold = 1; rpkm_threshold = 0
gene_exp_threshold = 1; exon_rpkm_threshold = 0
if array_type == 'RNASeq':
gene_exp_threshold = 0
else:
if array_type != "3'array":
try:
p_threshold = float(p_threshold); alt_exon_fold_variable = float(alt_exon_fold_variable)
expression_threshold = float(expression_threshold); gene_expression_cutoff = float(gene_expression_cutoff)
dabg_p = float(dabg_p); additional_score = float(additional_score)
gene_expression_cutoff = float(gene_expression_cutoff)
except Exception:
try: gene_expression_cutoff = float(gene_expression_cutoff)
except Exception: gene_expression_cutoff = 0
try: rpkm_threshold = float(rpkm_threshold)
except Exception: rpkm_threshold = -1
try: exon_exp_threshold = float(exon_exp_threshold)
except Exception: exon_exp_threshold = 0
try: gene_exp_threshold = float(gene_exp_threshold)
except Exception: gene_exp_threshold = 0
try: exon_rpkm_threshold = float(exon_rpkm_threshold)
except Exception: exon_rpkm_threshold = 0
if filter_probeset_types not in probeset_types and array_type == 'exon':
print "Invalid probeset-type entered:",filter_probeset_types,'. Must be "full", "extended" or "core"'; sys.exit()
elif array_type == 'gene' and filter_probeset_types == 'NA': filter_probeset_types = 'core'
if dabg_p > 1 or dabg_p <= 0:
print "Invalid DABG p-value entered:",dabg_p,'. Must be > 0 and <= 1'; sys.exit()
if expression_threshold <1:
print "Invalid expression threshold entered:",expression_threshold,'. Must be > 1'; sys.exit()
if p_threshold > 1 or p_threshold <= 0:
print "Invalid alternative exon p-value entered:",p_threshold,'. Must be > 0 and <= 1'; sys.exit()
if alt_exon_fold_variable < 1 and analysis_method != 'ASPIRE':
print "Invalid alternative exon threshold entered:",alt_exon_fold_variable,'. Must be > 1'; sys.exit()
if gene_expression_cutoff < 1:
print "Invalid gene expression threshold entered:",gene_expression_cutoff,'. Must be > 1'; sys.exit()
if additional_score < 1:
print "Invalid additional score threshold entered:",additional_score,'. Must be > 1'; sys.exit()
if array_type == 'RNASeq':
if rpkm_threshold < 0:
print "Invalid gene RPKM threshold entered:",rpkm_threshold,'. Must be >= 0'; sys.exit()
if exon_exp_threshold < 1:
print "Invalid exon expression threshold entered:",exon_exp_threshold,'. Must be > 1'; sys.exit()
if exon_rpkm_threshold < 0:
print "Invalid exon RPKM threshold entered:",exon_rpkm_threshold,'. Must be >= 0'; sys.exit()
if gene_exp_threshold < 1:
print "Invalid gene expression threshold entered:",gene_exp_threshold,'. Must be > 1'; sys.exit()
if 'FIRMA' in additional_algorithms and array_type == 'RNASeq':
print 'FIRMA is not an available option for RNASeq... Changing this to splicing-index.'
additional_algorithms = 'splicing-index'
additional_algorithms = UI.AdditionalAlgorithms(additional_algorithms); additional_algorithms.setScore(additional_score)
if array_type == 'RNASeq':
manufacturer = 'RNASeq'
if 'CEL' in run_from_scratch: run_from_scratch = 'Process RNA-seq reads'
if build_exon_bedfile == 'yes': run_from_scratch = 'buildExonExportFiles'
if run_from_scratch == 'Process AltAnalyze filtered': expression_data_format = 'log' ### This is switched to log no matter what, after initial import and analysis of CEL or BED files
### These variables are modified from the defaults in the module UI as below
excludeNonExpExons = True
if avg_all_for_ss == 'yes': avg_all_for_ss = 'yes'
elif 'all exon aligning' in avg_all_for_ss or 'known exons' in avg_all_for_ss or 'expressed exons' in avg_all_for_ss:
if 'known exons' in avg_all_for_ss and array_type == 'RNASeq': excludeNonExpExons = False
avg_all_for_ss = 'yes'
else: avg_all_for_ss = 'no'
if run_MiDAS == 'NA': run_MiDAS = 'no'
if perform_alt_analysis == 'yes': perform_alt_analysis = 'yes'
elif perform_alt_analysis == 'expression': perform_alt_analysis = 'expression'
elif perform_alt_analysis == 'just expression': perform_alt_analysis = 'expression'
elif perform_alt_analysis == 'no': perform_alt_analysis = 'expression'
elif platform != "3'array": perform_alt_analysis = 'both'
if systemToUse != None: array_type = systemToUse
try: permute_p_threshold = float(permute_p_threshold)
except Exception: permute_p_threshold = permute_p_threshold
### Store variables for AltAnalyzeMain
expr_var = species,array_type,manufacturer,constitutive_source,dabg_p,expression_threshold,avg_all_for_ss,expression_data_format,include_raw_data,run_from_scratch,perform_alt_analysis
alt_var = analysis_method,p_threshold,filter_probeset_types,alt_exon_fold_variable,gene_expression_cutoff,remove_intronic_junctions,permute_p_threshold,perform_permutation_analysis, export_NI_values, analyze_all_conditions
additional_var = calculate_normIntensity_p, run_MiDAS, use_direct_domain_alignments_only, microRNA_prediction_method, filter_for_AS, additional_algorithms
goelite_var = ge_fold_cutoffs,ge_pvalue_cutoffs,ge_ptype,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,goelite_permutations,mod,returnPathways
if run_from_scratch == 'buildExonExportFiles':
fl = UI.ExpressionFileLocationData('','','',''); fl.setExonBedBuildStatus('yes'); fl.setFeatureNormalization('none')
fl.setCELFileDir(cel_file_dir); fl.setArrayType(array_type); fl.setOutputDir(output_dir)
fl.setMultiThreading(multiThreading)
exp_file_location_db={}; exp_file_location_db[dataset_name]=fl; parent_dir = output_dir
perform_alt_analysis = 'expression'
if run_from_scratch == 'Process Expression file':
if len(input_exp_file)>0:
if groups_file != None and comps_file != None:
if 'exp.' in input_exp_file: new_exp_file = input_exp_file
else:
new_exp_file = export.findParentDir(input_exp_file)+'exp.'+export.findFilename(input_exp_file)
if 'ExpressionInput' not in new_exp_file:
### This expression file is not currently used (could make it the default after copying to this location)
if output_dir[-1] != '/' and output_dir[-1] != '\\':
output_dir += '/'
new_exp_file = output_dir+'ExpressionInput/'+export.findFilename(new_exp_file)
try: export.copyFile(input_exp_file, new_exp_file)
except Exception: print 'Expression file already present in target location.'
try: export.copyFile(groups_file, string.replace(new_exp_file,'exp.','groups.'))
except Exception: print 'Groups file already present in target location OR bad input path.'
try: export.copyFile(comps_file, string.replace(new_exp_file,'exp.','comps.'))
except Exception: print 'Comparison file already present in target location OR bad input path.'
groups_file = string.replace(new_exp_file,'exp.','groups.')
comps_file = string.replace(new_exp_file,'exp.','comps.')
input_exp_file = new_exp_file
if verifyGroupFileFormat(groups_file) == False:
print "\nWarning! The format of your groups file is not correct. For details, see:\nhttp://code.google.com/p/altanalyze/wiki/ManualGroupsCompsCreation\n"
sys.exit()
try:
cel_files, array_linker_db = ExpressionBuilder.getArrayHeaders(input_exp_file)
if len(input_stats_file)>1: ###Make sure the files have the same arrays and order first
cel_files2, array_linker_db2 = ExpressionBuilder.getArrayHeaders(input_stats_file)
if cel_files2 != cel_files:
print "The probe set p-value file:\n"+input_stats_file+"\ndoes not have the same array order as the\nexpression file. Correct before proceeding."; sys.exit()
except Exception: print '\nWARNING...Expression file not found: "'+input_exp_file+'"\n\n'; sys.exit()
exp_name = string.replace(exp_name,'exp.',''); dataset_name = exp_name; exp_name = string.replace(exp_name,'.txt','')
groups_name = 'ExpressionInput/groups.'+dataset_name; comps_name = 'ExpressionInput/comps.'+dataset_name
groups_file_dir = output_dir+'/'+groups_name; comps_file_dir = output_dir+'/'+comps_name
groups_found = verifyFile(groups_file_dir)
comps_found = verifyFile(comps_file_dir)
if ((groups_found != 'found' or comps_found != 'found') and analyze_all_conditions != 'all groups') or (analyze_all_conditions == 'all groups' and groups_found != 'found'):
files_exported = UI.predictGroupsAndComps(cel_files,output_dir,exp_name)
if files_exported == 'yes': print "AltAnalyze inferred a groups and comps file from the CEL file names."
elif run_lineage_profiler == 'yes' and input_file_dir != None and pipelineAnalysis == False and '--runLineageProfiler' in arguments: pass
else: print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';sys.exit()
fl = UI.ExpressionFileLocationData(input_exp_file,input_stats_file,groups_file_dir,comps_file_dir)
dataset_name = exp_name
if analyze_all_conditions == "all groups":
try: array_group_list,group_db = UI.importArrayGroupsSimple(groups_file_dir,cel_files)
except Exception:
print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';sys.exit()
print len(group_db), 'groups found'
if len(group_db) == 2: analyze_all_conditions = 'pairwise'
exp_file_location_db={}; exp_file_location_db[exp_name]=fl
elif run_from_scratch == 'Process CEL files' or run_from_scratch == 'Process RNA-seq reads' or run_from_scratch == 'Process Feature Extraction files':
if groups_file != None and comps_file != None:
try: shutil.copyfile(groups_file, string.replace(exp_file_dir,'exp.','groups.'))
except Exception: print 'Groups file already present in target location OR bad input path.'
try: shutil.copyfile(comps_file, string.replace(exp_file_dir,'exp.','comps.'))
except Exception: print 'Comparison file already present in target location OR bad input path.'
stats_file_dir = string.replace(exp_file_dir,'exp.','stats.')
groups_file_dir = string.replace(exp_file_dir,'exp.','groups.')
comps_file_dir = string.replace(exp_file_dir,'exp.','comps.')
groups_found = verifyFile(groups_file_dir)
comps_found = verifyFile(comps_file_dir)
if ((groups_found != 'found' or comps_found != 'found') and analyze_all_conditions != 'all groups') or (analyze_all_conditions == 'all groups' and groups_found != 'found'):
if mappedExonAnalysis: pass
else:
files_exported = UI.predictGroupsAndComps(cel_files,output_dir,exp_name)
if files_exported == 'yes': print "AltAnalyze inferred a groups and comps file from the CEL file names."
#else: print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';sys.exit()
fl = UI.ExpressionFileLocationData(exp_file_dir,stats_file_dir,groups_file_dir,comps_file_dir)
exp_file_location_db={}; exp_file_location_db[dataset_name]=fl
parent_dir = output_dir ### interchangable terms (parent_dir used with expression file import)
if analyze_all_conditions == "all groups":
array_group_list,group_db = UI.importArrayGroupsSimple(groups_file_dir,cel_files)
UI.exportGroups(exp_file_location_db,array_group_list)
print len(group_db), 'groups found'
if len(group_db) == 2: analyze_all_conditions = 'pairwise'
try: fl.setRunKallisto(input_fastq_dir)
except Exception: pass
elif run_from_scratch == 'Process AltAnalyze filtered':
if '.txt' in input_filtered_dir: ### Occurs if the user tries to load a specific file
dirs = string.split(input_filtered_dir,'/')
input_filtered_dir = string.join(dirs[:-1],'/')
fl = UI.ExpressionFileLocationData('','','',''); dataset_name = 'filtered-exp_dir'
dirs = string.split(input_filtered_dir,'AltExpression'); parent_dir = dirs[0]
exp_file_location_db={}; exp_file_location_db[dataset_name]=fl
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset_name]
file_location_defaults = UI.importDefaultFileLocations()
apt_location = UI.getAPTLocations(file_location_defaults,run_from_scratch,run_MiDAS)
fl.setAPTLocation(apt_location)
if run_from_scratch == 'Process CEL files':
if xhyb_remove == 'yes' and (array_type == 'gene' or array_type == 'junction'): xhyb_remove = 'no' ### This is set when the user mistakenly selects exon array, initially
fl.setInputCDFFile(input_cdf_file); fl.setCLFFile(clf_file); fl.setBGPFile(bgp_file); fl.setXHybRemoval(xhyb_remove)
fl.setCELFileDir(cel_file_dir); fl.setArrayType(array_type_original); fl.setOutputDir(output_dir)
elif run_from_scratch == 'Process RNA-seq reads':
fl.setCELFileDir(cel_file_dir); fl.setOutputDir(output_dir)
elif run_from_scratch == 'Process Feature Extraction files':
fl.setCELFileDir(cel_file_dir); fl.setOutputDir(output_dir)
fl = exp_file_location_db[dataset]; fl.setRootDir(parent_dir)
try: apt_location = fl.APTLocation()
except Exception: apt_location = ''
root_dir = fl.RootDir(); fl.setExonBedBuildStatus(build_exon_bedfile)
fl.setMarkerFinder(marker_finder)
fl.setFeatureNormalization(normalize_feature_exp)
fl.setNormMatrix(normalize_gene_data)
fl.setProbabilityStatistic(probability_statistic)
fl.setProducePlots(visualize_qc_results)
fl.setPerformLineageProfiler(run_lineage_profiler)
fl.setCompendiumType(compendiumType)
fl.setCompendiumPlatform(compendiumPlatform)
fl.setVendor(manufacturer)
try: fl.setFDRStatistic(FDR_statistic)
except Exception: pass
fl.setAnalysisMode('commandline')
fl.setBatchEffectRemoval(batch_effects)
fl.setChannelToExtract(channel_to_extract)
fl.setMultiThreading(multiThreading)
try: fl.setExcludeLowExpressionExons(excludeNonExpExons)
except Exception: fl.setExcludeLowExpressionExons(True)
if 'other' in manufacturer or 'Other' in manufacturer:
### For data without a primary array ID key
manufacturer = "other:3'array"
fl.setVendor(manufacturer)
if array_type == 'RNASeq': ### Post version 2.0, add variables in fl rather than below
fl.setRPKMThreshold(rpkm_threshold)
fl.setExonExpThreshold(exon_exp_threshold)
fl.setGeneExpThreshold(gene_exp_threshold)
fl.setExonRPKMThreshold(exon_rpkm_threshold)
fl.setJunctionExpThreshold(expression_threshold)
fl.setExonMapFile(exonMapFile)
fl.setPlatformType(platformType)
### Verify database presence
try: dirs = unique.read_directory('/AltDatabase')
except Exception: dirs=[]
if species not in dirs:
print '\n'+species,'species not yet installed. Please install before proceeding (e.g., "python AltAnalyze.py --update Official --species',species,'--version EnsMart65").'
global commandLineMode; commandLineMode = 'yes'
AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db,None)
else:
print 'Insufficient Flags entered (requires --species and --output)'
def cleanUpCommandArguments():
### Needed on PC
command_args = string.join(sys.argv,' ')
arguments = string.split(command_args,' --')
for argument in arguments:
"""
argument_list = string.split(argument,' ')
if len(argument_list)>2:
filename = string.join(argument_list[1:],' ')
argument = argument_list[0]+' '+string.replace(filename,' ','$$$')
"""
argument_list = string.split(argument,' ')
#argument = string.join(re.findall(r"\w",argument),'')
if ':' in argument: ### Windows OS
z = string.find(argument_list[1],':')
if z!= -1 and z!=1: ### Hence, it is in the argument but not at the second position
print 'Illegal parentheses found. Please re-type these and re-run.'; sys.exit()
def runCommandLineVersion():
### This code had to be moved to a separate function to prevent iterative runs upon AltAnalyze.py re-import
command_args = string.join(sys.argv,' ')
#try: cleanUpCommandArguments()
#except Exception: null=[]
print 3,[sys.argv],
if len(sys.argv[1:])>0 and '--' in command_args:
if '--GUI' in command_args:
### Hard-restart of AltAnalyze while preserving the prior parameters
command_arguments = string.split(command_args,' --')
if len(command_arguments)>2:
command_arguments = map(lambda x: string.split(x,' '),command_arguments)
command_arguments = map(lambda (x,y): (x,string.replace(y,'__',' ')),command_arguments[2:])
selected_parameters = [command_arguments[0][1]]
user_variables={}
for (o,v) in command_arguments: user_variables[o]=v
AltAnalyzeSetup((selected_parameters,user_variables))
else:
AltAnalyzeSetup('no') ### a trick to get back to the main page of the GUI (if AltAnalyze has Tkinter conflict)
try:
commandLineRun()
except Exception:
print traceback.format_exc()
###### Determine Command Line versus GUI Control ######
command_args = string.join(sys.argv,' ')
if len(sys.argv[1:])>1 and '-' in command_args: null=[]
else:
try:
import Tkinter
from Tkinter import *
import PmwFreeze
import tkFileDialog
from tkFont import Font
use_Tkinter = 'yes'
except ImportError: use_Tkinter = 'yes'; print "\nPmw or Tkinter not found... Tkinter print out not available";
def testResultsPanel():
import QC
file = "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/3'Array/Merrill/ExpressionInput/exp.test.txt"
#QC.outputArrayQC(file)
global root; root = Tk()
global pathway_permutations; pathway_permutations = 'NA'
global log_file; log_file = 'null.txt'
global array_type; global explicit_data_type
global run_GOElite; run_GOElite = 'run-immediately'
explicit_data_type = 'exon-only'
array_type = 'RNASeq'
fl = UI.ExpressionFileLocationData('','','','')
graphic_links = []
graphic_links.append(['PCA','PCA.png'])
graphic_links.append(['HC','HC.png'])
graphic_links.append(['PCA1','PCA.png'])
graphic_links.append(['HC1','HC.png'])
graphic_links.append(['PCA2','PCA.png'])
graphic_links.append(['HC2','HC.png'])
graphic_links.append(['PCA3','PCA.png'])
graphic_links.append(['HC3','HC.png'])
graphic_links.append(['PCA4','PCA.png'])
graphic_links.append(['HC4','HC.png'])
summary_db={}
summary_db['QC'] = graphic_links
#summary_db={}
fl.setGraphicLinks(graphic_links)
summary_db['gene_assayed'] = 1
summary_db['denominator_exp_genes'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_genes'] = 1
summary_db['direct_domain_genes'] = 1
summary_db['miRNA_gene_hits'] = 1
#summary_db={}
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
dataset = 'test'; results_dir=''
print "Analysis Complete\n";
if root !='' and root !=None:
UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'GE',results_dir,dataset,'parent',summary_db)
print 'here'
#sys.exit()
class Logger(object):
def __init__(self,null):
self.terminal = sys.stdout
self.log = open(log_file, "w")
def write(self, message):
self.terminal.write(message)
self.log = open(log_file, "a")
self.log.write(message)
self.log.close()
def flush(self): pass
def dependencyCheck():
### Make sure core dependencies for AltAnalyze are met and if not report back
from pkgutil import iter_modules
modules = set(x[1] for x in iter_modules()) ### all installed modules
dependent_modules = ['string','csv','base64','getpass','requests']
dependent_modules += ['math','warnings','sklearn','time','os','webbrowser']
dependent_modules += ['scipy','numpy','matplotlib','igraph','pandas','patsy']
dependent_modules += ['ImageTk','PIL','cairo','wx','fastcluster','pysam', 'Tkinter']
print ''
count=0
for module in dependent_modules:
if module not in modules:
print 'AltAnalyze depedency not met for:',module
if 'fastcluster' == module:
print '...Faster hierarchical cluster not supported without fastcluster'
if 'pysam' == module:
print '...BAM file access not supported without pysam'
if 'scipy' == module:
print '...Many required statistical routines not supported without scipy'
if 'numpy' == module:
print '...Many required statistical routines not supported without numpy'
if 'matplotlib' == module:
print '...Core graphical outputs not supported without matplotlib'
if 'requests' == module:
print '...Wikipathways visualization not supported without requests'
if 'lxml' == module:
print '...Wikipathways visualization not supported without lxml'
if 'wx' == module:
print '...The AltAnalyze Results Viewer requires wx'
if 'ImageTk' == module or 'PIL' == module:
print '...Some graphical results displays require ImageTk and PIL'
if 'Tkinter' == module:
print '...AltAnalyze graphical user interface mode requires Tkinter'
if 'igraph' == module or 'cairo' == module:
print '...Network visualization requires igraph and cairo'
if 'sklearn' == module:
print '...t-SNE analysis requires sklearn'
if 'pandas' == module or 'patsy' == module:
print '...Combat batch effects correction requires pandas and patsy'
count+=1
if count>0:
print '\nWARNING!!!! Some dependencies are not currently met.'
print "This will likely impact some of AltAnalyze's performance\n"
if __name__ == '__main__':
try: mlp.freeze_support()
except Exception: pass
#testResultsPanel()
skip_intro = 'yes'; #sys.exit()
#skip_intro = 'remoteViewer'
runCommandLineVersion()
dependencyCheck()
if use_Tkinter == 'yes': AltAnalyzeSetup(skip_intro)
""" To do list:
0) (done) Integrate new network visualizationality in clustering
1) RNA-Seq and LineageProfiler: threshold based RPKM expression filtering for binary absent present gene and exon calls
2) (demo) Splicing graph/isoform visualization
3) SQLite for gene-set databases prior to clustering and network visualization
4) (done) Gene-level correlation queries for clustering
5) (explored - not good) Optional algorithm type of PCA
6) (done) Optional normalization of expression data for clustering
7) (partially) Integrate splicing factor enrichment analysis (separate module?)
8) (done) Venn diagram option
9) (done) Additional Analyses: (A) combine lists, (B) annotate ID list, (C) run marker finder directly, (D) any graph from table option, (E) network from SIF, (F) inference networks from gene-lists (protein-protein, protein-DNA, protein-splicing)
10) Optional denominator option for GO-Elite (create from input and ID system IDs)
11) Update fields in summary combined alt.exon files (key by probeset)
12) Check field names for junction, exon, RNA-Seq in summary alt.exon report
13) (done) Support additional ID types for initial import (ID select option and pulldown - Other)
14) Proper FDR p-value for alt.exon analyses (include all computed p-values)
15) Add all major clustering and LineageProfiler options to UI along with stats filtering by default
16) (done) Make GO-Elite analysis the default
17) Support R check (and response that they need it) along with GUI gcrma, agilent array, hopach, combat
18) Probe-level annotations from Ensembl (partial code in place) and probe-level RMA in R (or possibly APT) - google pgf for U133 array
19) (done) Include various gene databases for LineageProfiler in download and allow for custom databases to be used (markerFinder based)
20) (done) Quantile normalization option for any non-Affy, non-RNASeq data (check box)
21) (done) Import agilent from Feature extraction files (pull-down option)
22) Update the software from the software
Advantages of this tool kit:
0) Easiest to use, hands down
1) Established and novel functionality for transcriptome/proteomics analysis built in
2) Independent and cooperative options for RNA-Seq and array analysis (splicing and gene expression)
3) Superior functional analyses (TF-target, splicing-factor target, lineage markers, WikiPathway visualization)
4) Options for different levels of users with different integration options (multiple statistical method options, option R support)
5) Built in secondary analysis options for already processed data (graphing, clustering, biomarker discovery, pathway analysis, network visualization)
6) Incorporates highly validated alternative exon identification methods, independent and jointly
Primary Engineer Work:
0) C-library calls and/or multithreading where applicable to improve peformance.
1) MySQL or equivalent transition for all large database queries (e.g., HuEx 2.1 on-the-fly coordinate mapping).
2) Splicing-domain visualization (matplotlib).
3) Isoform-domain network visualization and WP overlays.
4) Webservice calls to in silico protein translation, domain prediction, splicing factor regulation.
5) Stand-alone integration with bedtools, QC tools, TopHat, Cufflinks, Miso (optional).
### 2.0.9
moncole integration
generic and cell classification machine learning
PCR primer design (gene centric after file selection)
BAM->BED (local SAMTools)
updated APT
"""
|
mirrorview.py
|
import sys
from decimal import Decimal
import cv2
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from controller.viewcontroller import ViewController
from threading import Thread
from queue import Queue
qtCreatorFile = "views/mirrorwindow.ui" # Enter file here.
Ui_MirrorWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)
class MirrorWindow(QtWidgets.QMainWindow, Ui_MirrorWindow):
def __init__(self, viewController: ViewController):
QtWidgets.QMainWindow.__init__(self)
Ui_MirrorWindow.__init__(self)
self.setupUi(self)
self.left_queue = Queue()
self.right_queue = Queue()
self.running = True
self.cam_thread_1 = Thread(
target=self.startCameraFeed,
args=(0, 1920/4, 1080/3, 30,))
self.cam_thread_1.start()
self.window_width = self.vidLeftMirror.frameSize().width()
self.window_height = self.vidLeftMirror.frameSize().height()
self.viewController = viewController
self.left_image = ImageContainer(self.vidLeftMirror)
self.right_image = ImageContainer(self.vidRightMirror)
self.btnBack.clicked.connect(self.end)
Thread(target=self.update_frame).start()
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.update_frame)
self.timer.start(1)
def end(self):
self.running = False
self.viewController.show_back()
def startCameraFeed(self, cam, width, height, fps):
capture = cv2.VideoCapture(cam)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
capture.set(cv2.CAP_PROP_FPS, fps)
while self.running:
frame = {}
retval, img = capture.read()
frame['img'] = img
if self.right_queue.qsize() < 10:
self.right_queue.put(frame)
else:
print(self.right_queue.qsize())
def calculate_tax(self):
print("Mirrro")
def update_frame(self):
#while(True):
if not self.right_queue.empty():
frame = self.right_queue.get()
img = frame['img']
img_height, img_width, img_colors = img.shape
scale_w = float(self.window_width) / float(img_width)
scale_h = float(self.window_height) / float(img_height)
scale = min([scale_w, scale_h])
if scale == 0:
scale = 1
#img = cv2.resize(img, None, fx=scale, fy=scale, interpolation = cv2.INTER_CUBIC)
img = cv2.resize(img, (self.window_width, self.window_height), interpolation = cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
height, width, bpc = img.shape
bpl = bpc * width
image = QtGui.QImage(img.data, width, height, bpl, QtGui.QImage.Format_RGB888)
self.right_image.setImage(image)
self.left_image.setImage(image)
class ImageContainer(QtWidgets.QWidget):
def __init__(self, parent=None):
super(ImageContainer, self).__init__(parent)
self.image = None
def setImage(self, image):
self.image = image
sz = image.size()
self.setMinimumSize(sz)
self.update()
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
if self.image:
qp.drawImage(QtCore.QPoint(0, 0), self.image)
qp.end()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.